diff --git a/.github/workflows/ansible-deploy-bonus.yml b/.github/workflows/ansible-deploy-bonus.yml new file mode 100644 index 0000000000..7cdb394b79 --- /dev/null +++ b/.github/workflows/ansible-deploy-bonus.yml @@ -0,0 +1,84 @@ +name: Ansible Deployment (Go Bonus) + +on: + push: + branches: [master, lab06] + paths: + - 'ansible/vars/app_bonus.yml' + - 'ansible/playbooks/deploy_bonus.yml' + - 'ansible/roles/web_app/**' + - '.github/workflows/ansible-deploy-bonus.yml' + pull_request: + branches: [master] + paths: + - 'ansible/vars/app_bonus.yml' + - 'ansible/playbooks/deploy_bonus.yml' + - 'ansible/roles/web_app/**' + +concurrency: + group: ansible-deploy-bonus-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: pip install ansible ansible-lint + + - name: Create dummy vault password for lint + run: echo "lint-dummy" > ansible/.vault_pass + + - name: Run ansible-lint + run: | + cd ansible + ansible-lint playbooks/deploy_bonus.yml roles/web_app/tasks/*.yml + + deploy: + name: Deploy Go Bonus Application + needs: lint + runs-on: ubuntu-latest + if: github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Ansible + run: pip install ansible + + - name: Setup SSH + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts + + - name: Deploy with Ansible + run: | + cd ansible + echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass + ansible-playbook playbooks/deploy_bonus.yml \ + -i inventory/hosts.ini \ + --vault-password-file /tmp/vault_pass + rm -f /tmp/vault_pass + + - name: Verify Deployment + run: | + ssh -o StrictHostKeyChecking=no ${{ secrets.VM_USER }}@${{ secrets.VM_HOST }} \ + "curl -sf http://localhost:8001/health" diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml new file mode 100644 index 0000000000..3cf472d1a0 --- /dev/null +++ b/.github/workflows/ansible-deploy.yml @@ -0,0 +1,82 @@ +name: Ansible Deployment (Python) + +on: + push: + branches: [master, lab06] + paths: + - 'ansible/**' + - '!ansible/docs/**' + - '!ansible/vars/app_bonus.yml' + - '.github/workflows/ansible-deploy.yml' + pull_request: + branches: [master] + paths: + - 'ansible/**' + +concurrency: + group: ansible-deploy-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: pip install ansible ansible-lint + + - name: Create dummy vault password for lint + run: echo "lint-dummy" > ansible/.vault_pass + + - name: Run ansible-lint + run: | + cd ansible + ansible-lint playbooks/*.yml roles/*/tasks/*.yml + + deploy: + name: Deploy Python Application + needs: lint + runs-on: ubuntu-latest + if: github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Ansible + run: pip install ansible + + - name: Setup SSH + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts + + - name: Deploy with Ansible + run: | + cd ansible + echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass + ansible-playbook playbooks/deploy_python.yml \ + -i inventory/hosts.ini \ + --vault-password-file /tmp/vault_pass + rm -f /tmp/vault_pass + + - name: Verify Deployment + run: | + ssh -o StrictHostKeyChecking=no ${{ secrets.VM_USER }}@${{ secrets.VM_HOST }} \ + "curl -sf http://localhost:8000/health" diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml new file mode 100644 index 0000000000..56fa4fee44 --- /dev/null +++ b/.github/workflows/go-ci.yml @@ -0,0 +1,116 @@ +name: Go CI + +on: + push: + branches: [master, lab03] + paths: + - 'app_go/**' + - '.github/workflows/go-ci.yml' + pull_request: + branches: [master] + paths: + - 'app_go/**' + - '.github/workflows/go-ci.yml' + +# Cancel in-progress runs on same branch +concurrency: + group: go-ci-${{ github.ref }} + cancel-in-progress: true + +env: + DOCKER_IMAGE: aezuraa/devops-info-service + GO_VERSION: '1.23' + +jobs: + lint-and-test: + name: Lint & Test + runs-on: ubuntu-latest + defaults: + run: + working-directory: app_go + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache-dependency-path: app_go/go.mod + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + working-directory: app_go + + - name: Run tests with coverage + run: | + go test -v -coverprofile=coverage.out ./... + go tool cover -func=coverage.out + + - name: Fix coverage paths for Codecov + run: sed -i 's|devops-info-service/|app_go/|g' coverage.out + + - name: Upload coverage to Codecov + if: github.event_name == 'push' + uses: codecov/codecov-action@v4 + with: + file: app_go/coverage.out + flags: go + token: ${{ secrets.CODECOV_TOKEN }} + continue-on-error: true + + docker: + name: Docker Build & Push + runs-on: ubuntu-latest + needs: lint-and-test + if: github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Generate CalVer version + id: version + run: | + echo "calver=$(date +'%Y.%m.%d')" >> $GITHUB_OUTPUT + echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: app_go + push: true + tags: | + ${{ env.DOCKER_IMAGE }}:go + ${{ env.DOCKER_IMAGE }}:go-${{ steps.version.outputs.calver }} + ${{ env.DOCKER_IMAGE }}:go-${{ steps.version.outputs.sha_short }} + cache-from: type=gha + cache-to: type=gha,mode=max + + snyk: + name: Snyk Security Scan + runs-on: ubuntu-latest + needs: lint-and-test + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/golang@master + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + args: --file=app_go/go.mod --severity-threshold=high + continue-on-error: true diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..e6eec4997c --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,122 @@ +name: Python CI + +on: + push: + branches: [master, lab03] + paths: + - 'app_python/**' + - '.github/workflows/python-ci.yml' + pull_request: + branches: [master] + paths: + - 'app_python/**' + - '.github/workflows/python-ci.yml' + +# Cancel in-progress runs on same branch +concurrency: + group: python-ci-${{ github.ref }} + cancel-in-progress: true + +env: + DOCKER_IMAGE: aezuraa/devops-info-service + PYTHON_VERSION: '3.12' + +jobs: + lint-and-test: + name: Lint & Test + runs-on: ubuntu-latest + defaults: + run: + working-directory: app_python + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: 'pip' + cache-dependency-path: app_python/requirements-dev.txt + + - name: Install dependencies + run: pip install -r requirements-dev.txt + + - name: Run flake8 linter + run: flake8 app.py --max-line-length=120 + + - name: Run unit tests with coverage + run: pytest tests/ -v --cov=. --cov-report=term-missing --cov-report=xml --cov-fail-under=70 + + - name: Upload coverage to Codecov + if: github.event_name == 'push' + uses: codecov/codecov-action@v4 + with: + file: app_python/coverage.xml + flags: python + token: ${{ secrets.CODECOV_TOKEN }} + continue-on-error: true + + docker: + name: Docker Build & Push + runs-on: ubuntu-latest + needs: lint-and-test + if: github.event_name == 'push' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Generate CalVer version + id: version + run: | + echo "calver=$(date +'%Y.%m.%d')" >> $GITHUB_OUTPUT + echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: app_python + push: true + tags: | + ${{ env.DOCKER_IMAGE }}:python + ${{ env.DOCKER_IMAGE }}:python-${{ steps.version.outputs.calver }} + ${{ env.DOCKER_IMAGE }}:python-${{ steps.version.outputs.sha_short }} + cache-from: type=gha + cache-to: type=gha,mode=max + + snyk: + name: Snyk Security Scan + runs-on: ubuntu-latest + needs: lint-and-test + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + run: pip install -r app_python/requirements.txt + + - name: Install Snyk CLI + run: npm install -g snyk + + - name: Run Snyk to check for vulnerabilities + run: snyk test --file=app_python/requirements.txt --severity-threshold=high --package-manager=pip + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + continue-on-error: true diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml new file mode 100644 index 0000000000..922445ee57 --- /dev/null +++ b/.github/workflows/terraform-ci.yml @@ -0,0 +1,54 @@ +name: Terraform CI + +on: + push: + branches: [master, lab04] + paths: + - 'terraform/**' + - '.github/workflows/terraform-ci.yml' + pull_request: + branches: [master] + paths: + - 'terraform/**' + - '.github/workflows/terraform-ci.yml' + +concurrency: + group: terraform-ci-${{ github.ref }} + cancel-in-progress: true + +jobs: + validate: + name: Validate Terraform + runs-on: ubuntu-latest + defaults: + run: + working-directory: terraform + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.5.7" + + - name: Check formatting + run: terraform fmt -check -recursive + + - name: Initialize Terraform + run: terraform init -backend=false + + - name: Validate configuration + run: terraform validate + + - name: Setup TFLint + uses: terraform-linters/setup-tflint@v3 + with: + tflint_version: latest + + - name: Initialize TFLint + run: tflint --init + + - name: Run TFLint + run: tflint --format compact diff --git a/.gitignore b/.gitignore index 30d74d2584..8a789e4f9b 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,37 @@ -test \ No newline at end of file +test + +# Terraform +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl +terraform.tfvars +crash.log + +# Pulumi +pulumi/venv/ +Pulumi.*.yaml + +# Credentials +*.pem +*.key +.env +monitoring/.env + +# Ansible +*.retry +.vault_pass +ansible/.vault_pass +ansible/inventory/*.pyc +__pycache__/ + +# Python +*.pyc +*.pyo +venv/ + +# Local course secrets (do not commit) +k8s/COURSE_CREDENTIALS.local.md + +# macOS +.DS_Store \ No newline at end of file diff --git a/WORKERS.md b/WORKERS.md new file mode 100644 index 0000000000..7fd501a409 --- /dev/null +++ b/WORKERS.md @@ -0,0 +1,392 @@ +# Lab 17 — Cloudflare Workers Edge Deployment + +A serverless HTTP API deployed on Cloudflare's global edge network. The Worker +lives in [`edge-api/`](edge-api/) (TypeScript, scaffolded with Cloudflare's C3 +template `Worker only` + TypeScript), and is operated through Wrangler CLI. + +This is an **exam-alternative lab** (paired with [Lab 18 — Reproducible Builds +with Nix](labs/lab18.md), 20 + 20 = 40 pts replacing a 40 pt final exam). + +--- + +## 1. Deployment Summary + +| Field | Value | +|---|---| +| **Worker name** | `edge-api` | +| **Public URL** | `https://edge-api.aezuraa.workers.dev` | +| **Source** | [`edge-api/src/index.ts`](edge-api/src/index.ts) | +| **Config** | [`edge-api/wrangler.jsonc`](edge-api/wrangler.jsonc) | +| **Compatibility date** | `2026-05-10` (with `nodejs_compat`) | +| **Observability** | enabled — Workers Logs retain 24 h of `console.log()` | +| **Region model** | global by default — no region picker, code runs at the closest colo | + +### 1.1 Routes + +| Route | Purpose | Reads | +|---|---|---| +| `GET /` | Service metadata, uptime, route list | `vars.APP_NAME`, `vars.COURSE_NAME` | +| `GET /health` | Liveness probe | — | +| `GET /edge` | `request.cf` metadata (Task 3) | edge-side fields populated by Cloudflare | +| `GET /counter` | KV-backed visit counter (Task 4) | `SETTINGS` (KV namespace), key `visits` | +| `GET /whoami` | Redacted view of admin credentials (Task 4) | `secret.API_TOKEN`, `secret.ADMIN_EMAIL` | +| `*` (404) | Explicit unknown-route JSON with the list of known paths | — | + +### 1.2 Bindings (configured in [`wrangler.jsonc`](edge-api/wrangler.jsonc)) + +```jsonc +"vars": { + "APP_NAME": "edge-api", + "COURSE_NAME": "devops-core" +}, +"kv_namespaces": [ + { "binding": "SETTINGS", "id": "d8acf08371ae47c0b0c848b0a0bbf0e2" } +] +// Secrets API_TOKEN and ADMIN_EMAIL are set via `wrangler secret put` — +// never committed to wrangler.jsonc. +``` + +--- + +## 2. Setup & Deployment Workflow + +### 2.1 Authenticate Wrangler + +```bash +cd edge-api +npx wrangler login # opens a browser for OAuth +npx wrangler whoami # confirm account / email +``` + +### 2.2 Create the KV namespace + +```bash +npx wrangler kv namespace create SETTINGS +# → output: +# 🌀 Creating namespace with title "edge-api-SETTINGS" +# ✅ Success! +# Add the following to your configuration file in your kv_namespaces array: +# [[kv_namespaces]] +# binding = "SETTINGS" +# id = "abcdef0123456789..." +``` + +The returned `id` was pasted into [`wrangler.jsonc`](edge-api/wrangler.jsonc) +under `kv_namespaces[0].id`. + +### 2.3 Set the secrets + +```bash +npx wrangler secret put API_TOKEN +# stdin: paste the token (random hex), hit enter +npx wrangler secret put ADMIN_EMAIL +# stdin: +``` + +Secrets do not appear in `wrangler.jsonc` and are not in Git — they are +encrypted by Cloudflare and only available at runtime as `env.API_TOKEN` / +`env.ADMIN_EMAIL`. The `/whoami` endpoint reads them but returns only the +last 4 characters of the token and an obfuscated email. + +### 2.4 Deploy + +```bash +npx wrangler deploy +# → 🌀 Building list of assets... +# ✅ Deployed edge-api triggers +# https://edge-api.aezuraa.workers.dev +``` + +A second deploy follows after a minor source change to populate the +**Versions & Deployments** history (Task 5). + +--- + +## 3. Evidence + +### 3.1 `/edge` JSON from the public URL (Task 3) + +```text +$ curl -s https://edge-api.aezuraa.workers.dev/edge | jq . +{ + "colo": "ARN", + "country": "FI", + "city": "Helsinki", + "region": "Uusimaa", + "asn": 56971, + "asOrganization": "CGI GLOBAL LIMITED", + "httpProtocol": "HTTP/2", + "tlsVersion": "TLSv1.3", + "timezone": "Europe/Helsinki", + "clientTcpRtt": 48, + "note": "Fields are populated by Cloudflare's edge runtime. ..." +} +``` + +`colo: ARN` is Cloudflare's Stockholm point of presence, the closest one to +the VPN exit (`asOrganization: CGI GLOBAL LIMITED`). `httpProtocol: HTTP/2` +confirms this is the real edge — the local `wrangler dev` proxy returned +`HTTP/1.1` for the same request. `clientTcpRtt: 48 ms` reflects the +end-to-end network latency between the VPN exit and the colo. None of these +fields were chosen by us — Cloudflare picks the colo automatically based on +anycast routing. + +![Cloudflare dashboard — Worker overview](screenshots/lab17/01-dashboard-worker.png) + +![Public URL responding](screenshots/lab17/02-public-url-curl.png) + +### 3.2 KV persistence (Task 4) + +```text +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 1 +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 2 +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 3 +$ npx wrangler deploy # v2 deploy +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 4 ← survived redeploy +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 5 (via wrangler tail burst) +$ curl -s https://edge-api.aezuraa.workers.dev/counter | jq .visits # 6 (via wrangler tail burst) +``` + +The visit counter sequence `1 → 2 → 3 → [redeploy] → 4 → 5 → 6` is direct +evidence of KV durability across both deployments — the running code was +replaced but the stored value in the SETTINGS namespace was not. + +![KV namespace with `visits` key](screenshots/lab17/03-kv-namespace.png) + +### 3.3 Secrets (Task 4) + +```text +$ curl -s https://edge-api.aezuraa.workers.dev/whoami | jq . +{ + "app": "edge-api", + "admin_email": "eg***@gmail.com", + "api_token": "****************************cbc6", + "note": "Both ADMIN_EMAIL and API_TOKEN are Wrangler secrets ..." +} +``` + +`api_token` is shown with only the last 4 characters (`cbc6`) — the rest +of the 32-char hex token is masked. `admin_email` is shown as the first 2 +characters of the local part plus the domain (`eg***@gmail.com`). The raw +values never leave the runtime; the dashboard also only shows them as +`(encrypted)`. + +The `wrangler deployments list` output confirms a `Source: Secret Change` +revision was created automatically when the secret was updated — Cloudflare +treats secret rotation as a deployment. + +![Secrets page in dashboard (values hidden)](screenshots/lab17/04-secrets.png) + +### 3.4 Logs (Task 5) + +```bash +npx wrangler tail +``` + +Sample structured log lines from a 7-request burst captured during this lab: + +```text +$ npx wrangler tail --format=pretty + ⛅️ wrangler 4.90.0 +─────────────────── +Successfully created tail, expires at 2026-05-10T20:38:05Z +Connected to edge-api, waiting for logs... + +GET https://edge-api.aezuraa.workers.dev/ - Ok @ 5/10/2026, 5:38:50 PM + (log) {"level":"info","event":"request_start","method":"GET","path":"/","colo":"ARN","country":"FI","ts":"2026-05-10T14:38:50.064Z"} + (log) {"level":"info","event":"request_end","path":"/","status":200,"duration_ms":0} + +GET https://edge-api.aezuraa.workers.dev/counter - Ok @ 5/10/2026, 5:38:50 PM + (log) {"level":"info","event":"request_start","method":"GET","path":"/counter","colo":"ARN","country":"FI","ts":"2026-05-10T14:38:50.604Z"} + (log) {"level":"info","event":"counter_inc","previous":4,"next":5} + (log) {"level":"info","event":"request_end","path":"/counter","status":200,"duration_ms":135} + +GET https://edge-api.aezuraa.workers.dev/counter - Ok @ 5/10/2026, 5:38:50 PM + (log) {"level":"info","event":"request_start","method":"GET","path":"/counter","colo":"ARN","country":"FI","ts":"2026-05-10T14:38:50.929Z"} + (log) {"level":"info","event":"counter_inc","previous":5,"next":6} + (log) {"level":"info","event":"request_end","path":"/counter","status":200,"duration_ms":88} + +GET https://edge-api.aezuraa.workers.dev/unknown - Ok @ 5/10/2026, 5:38:51 PM + (log) {"level":"info","event":"request_start","method":"GET","path":"/unknown","colo":"ARN","country":"FI","ts":"2026-05-10T14:38:51.388Z"} + (log) {"level":"info","event":"request_end","path":"/unknown","status":404,"duration_ms":0} +``` + +Every line is a JSON object emitted by `console.log()` in the Worker. Two +events per request (`request_start` / `request_end`) plus a `counter_inc` +event when KV is mutated. `duration_ms: 0` for read-only paths because the +Worker handler completes faster than the millisecond resolution of +`Date.now()`; `/counter` shows the real KV write latency (88–135 ms). + +![`wrangler tail` showing live request log](screenshots/lab17/05-wrangler-tail.png) + +![Workers Logs in the dashboard](screenshots/lab17/06-dashboard-logs.png) + +### 3.5 Metrics (Task 5) + +The Workers dashboard "Metrics" tab shows requests per second, success rate, +median CPU time, and median wall time over the last 15 minutes. + +![Workers Metrics tab](screenshots/lab17/07-dashboard-metrics.png) + +### 3.6 Deployments & rollback (Task 5) + +```text +$ npx wrangler deployments list +Created: 2026-05-10T14:33:41.749Z +Author: egor2910ag@gmail.com +Source: Unknown (deployment) +Version(s): (100%) c69c40e6-edbf-4e3e-80e7-d0abbda4324b ← v1: initial deploy + +Created: 2026-05-10T14:36:23.525Z +Author: egor2910ag@gmail.com +Source: Secret Change +Version(s): (100%) 28b50813-0a33-4dae-b36a-985527d3585f ← ADMIN_EMAIL rotation + +Created: 2026-05-10T14:37:15.431Z +Author: egor2910ag@gmail.com +Source: Unknown (deployment) +Version(s): (100%) e2e865e2-3a86-47cf-a926-5a7a8e142fe7 ← v2: lazy START seed, + "v2" message, + version="1.0.1" +``` + +Three deployments are visible: the initial code deploy, an automatic +deployment triggered by `wrangler secret put ADMIN_EMAIL`, and the second +code deploy. Each gets a unique Version ID that survives forever — an arbitrary +prior version can be re-activated with `wrangler rollback `. + +![Deployments history in dashboard](screenshots/lab17/08-deployments-history.png) + +Rollback is performed via: + +```bash +npx wrangler rollback +``` + +After rollback `/health` and `/counter` continued to respond — the rolled-back +version reads the same KV namespace, so the visit count continues from where +it left off (KV is not versioned with the code). + +--- + +## 4. Kubernetes vs Cloudflare Workers + +| Aspect | Kubernetes (labs 09–16) | Cloudflare Workers (lab 17) | +|---|---|---| +| **Setup complexity** | High — install `kubectl`, `helm`, `minikube`, configure StorageClass, CRDs, charts | Low — `npm create cloudflare@latest`, `wrangler login`, deploy in 60 sec | +| **Deployment speed** | Container build + push + helm upgrade → 1–3 min on a cold path | `wrangler deploy` → 5–15 sec end-to-end | +| **Global distribution** | Manual: replicas in 1 region; multi-region needs federation, ingress, DNS work | Automatic: every `wrangler deploy` puts the code on every Cloudflare colo (~330 cities) — no region selection | +| **Cost (small apps)** | Cluster running 24/7 even with 0 traffic — node hours dominate | Pay-per-request: 100k req/day on free tier, then ~$0.30/M req | +| **State / persistence** | Full toolbox: PVC, StatefulSet, ConfigMap, Secret, external DBs, Vault | Edge-native: KV (eventual consistency, ~60 ms reads), Durable Objects (strong), R2 (S3-like), D1 (SQLite). No long-lived in-memory state per pod | +| **Runtime** | Any container image, any language, full POSIX, threads, sockets | V8 isolate (or Python via Pyodide), no native binaries, no listening sockets, 30 s CPU/30 MB RAM/request | +| **Control / flexibility** | High — choose runtime, scheduler, networking, sidecars, init containers, custom CRDs | Low — only HTTP / Cron / Queue triggers, no shell, no custom networking | +| **Observability** | Prometheus + Grafana + Loki stack (lab 16) — pull-based, you operate it | Built-in Workers Logs + Metrics — no infra, but limited query language | +| **Best use case** | Long-running stateful services, multi-process workloads, anything needing GPU/native deps | Globally distributed HTTP APIs, light edge logic, request rewriting, low-latency lookup | + +### 4.1 When to use each + +**Workers** when: + +- API is mostly HTTP, request handlers are short and stateless; +- traffic is global and you want low-latency everywhere; +- the team is small or there's no platform engineer to run a cluster; +- workload fits within the V8 limits (no native deps, no long CPU bursts); +- cost matters at low/medium QPS — running a cluster for a free-tier app is overkill. + +**Kubernetes** when: + +- you need a specific runtime (JVM, .NET, GPU workloads, full POSIX); +- pods are long-lived, hold significant in-memory state, or run sidecars; +- there are stateful workloads (databases, queues, search indexes) that you operate yourself; +- multi-tenant cluster usage justifies the operational overhead; +- you need rich, centralised observability (Prometheus + Loki + custom alerts). + +### 4.2 Recommendation + +For the `devops-info-python` style service used throughout labs 09–16 +(Flask app exposing `/`, `/health`, `/visits`, `/metrics`), **Workers is a +better fit** — the app is stateless, HTTP-only, traffic is light, and the +operational savings (no cluster, no scrape config, no helm upgrades) dwarf +the loss of flexibility. Kubernetes makes sense once you have multiple +services with shared state, sidecars, or non-HTTP protocols. + +--- + +## 5. Reflection + +### 5.1 What felt easier than Kubernetes + +- **Time to first deploy**: ~3 minutes from `npm create` to a public HTTPS URL, + vs. ~30 minutes to scaffold a chart, lint it, build an image, helm install, + open an Ingress, and verify probes. +- **No certificate / DNS work**: `workers.dev` gives a working HTTPS URL for + free — in K8s I had to wire `tls.crt`/`tls.key` and an `Ingress` (lab 09). +- **Zero-config logs and metrics**: enable `observability` in `wrangler.jsonc` + and the dashboard fills up. In K8s the same baseline took an entire lab + (16) to install kube-prometheus-stack, ServiceMonitor, named-port plumbing, + and dashboard exploration. +- **Secrets ergonomics**: `wrangler secret put NAME` vs. K8s `Secret` ↔ Vault + CSI/agent injection (labs 11/12) — no operator, no annotation soup. + +### 5.2 What felt more constrained + +- **No native binaries**, no shelling out — the entire app must fit V8. + Tools like `wget` (used in our lab 16 init container) simply can't run. +- **No long-lived in-memory state** between requests; the closest analogue + to a process-wide cache is Durable Objects, which is a separate primitive. +- **Limited runtime APIs**: most Node.js modules require explicit + `nodejs_compat` flag, and even then are restricted (e.g. `fs` is mostly + no-op). +- **One trigger model**: HTTP / Cron / Queue. Anything custom (TCP listener, + WebSocket server with arbitrary protocol) needs Durable Objects + WebSocket + hibernation, which is a different mental model. +- **Vendor lock-in by design**: `request.cf`, KV, Durable Objects, D1 are + all Cloudflare-specific. Migrating off Workers means a rewrite, whereas a + K8s deployment is portable across providers. + +### 5.3 What changed because Workers is not a Docker host + +- The Lab 2 Docker image was *not used at all* — Workers does not run + containers. The Python+Flask app from labs 09–16 was effectively + *replaced* with a TypeScript Worker exposing the same shape of API, not + *moved* to a new platform. +- Build pipeline collapses: no `docker build`, no registry, no image tag. + `wrangler deploy` bundles TypeScript directly with esbuild. +- Probes don't exist — there is no kubelet asking the Worker if it's alive, + no readiness gating. `/health` exists in the API for clients, not for the + platform. +- "Local development" is fundamentally different — `wrangler dev` + boots a v8 isolate locally and proxies edge metadata via Cloudflare's + dev edge (so `request.cf` is populated even locally, just from the + developer's network path, not the eventual user's). + +--- + +## 6. CLI Cheatsheet + +| Command | Purpose | +|---|---| +| `npm create cloudflare@latest -- edge-api` | Scaffold a new Worker project (C3) | +| `npx wrangler login` | Authenticate the CLI via browser OAuth | +| `npx wrangler whoami` | Show the active account / email | +| `npx wrangler dev` | Run the Worker locally on `http://localhost:8787` | +| `npx wrangler deploy` | Build + upload + activate the Worker globally | +| `npx wrangler kv namespace create ` | Provision a KV namespace, returns its id | +| `npx wrangler secret put ` | Set a secret from stdin | +| `npx wrangler tail` | Live-stream `console.log` from the deployed Worker | +| `npx wrangler deployments list` | Show deployment history with version ids | +| `npx wrangler rollback []` | Roll back to a previous version | +| `npm run cf-typegen` | Regenerate TypeScript types from `wrangler.jsonc` bindings | + +--- + +## 7. Troubleshooting (collected during this lab) + +| Symptom | Cause | Fix | +|---|---|---| +| `npx wrangler login` hangs in the browser | Cloudflare partially blocked on the network (RU ISPs, restrictive corporate proxies) | Switch to a full-tunnel VPN before running `wrangler login` | +| `wrangler deploy` fails with `Authentication error` | OAuth token expired (after months) | Re-run `npx wrangler login` | +| `KV id ` validation error on first dev / deploy | Forgot to replace placeholder after `kv namespace create` | Paste the returned id into `wrangler.jsonc` `kv_namespaces[0].id` | +| `request.cf` returns nulls in `wrangler dev` | Older wrangler versions don't proxy edge metadata | Update wrangler (`npm i -D wrangler@latest`) or test against the deployed URL | +| Secret used in code but `` at runtime | Set on the wrong environment, or not set at all | `npx wrangler secret list` to inspect; `secret put` again if missing | +| Worker bundle exceeds 1 MB / 10 MB | Heavy `node_modules` pulled in transitively | Trim deps; Workers paid plans raise the limit but it usually means the code shouldn't be a Worker | diff --git a/ansible/.ansible-lint b/ansible/.ansible-lint new file mode 100644 index 0000000000..022cafb067 --- /dev/null +++ b/ansible/.ansible-lint @@ -0,0 +1,3 @@ +--- +skip_list: + - var-naming[no-role-prefix] diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..54b3683b8c --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,12 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +remote_user = ubuntu +retry_files_enabled = False +vault_password_file = .vault_pass + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..5297388d66 --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,331 @@ +# Lab 05 — Ansible Fundamentals + +## 1. Architecture Overview + +**Ansible version:** 2.20.2 (ansible-core), Ansible package 13.3.0 +**Target VM OS:** Ubuntu 24.04 LTS (Yandex Cloud, provisioned via Pulumi in Lab 4) +**Control node:** macOS (local machine) + +### Role Structure + +``` +ansible/ +├── inventory/ +│ ├── hosts.ini # Static inventory with VM IP +│ └── group_vars/ +│ └── all.yml # Encrypted variables (Vault) +├── roles/ +│ ├── common/ # System packages & timezone +│ │ ├── tasks/main.yml +│ │ └── defaults/main.yml +│ ├── docker/ # Docker CE installation +│ │ ├── tasks/main.yml +│ │ ├── handlers/main.yml +│ │ └── defaults/main.yml +│ └── app_deploy/ # Container-based app deployment +│ ├── tasks/main.yml +│ ├── handlers/main.yml +│ └── defaults/main.yml +├── playbooks/ +│ ├── site.yml # Full setup (provision + deploy) +│ ├── provision.yml # System provisioning only +│ └── deploy.yml # App deployment only +├── ansible.cfg # Ansible configuration +├── .vault_pass # Vault password (gitignored) +└── docs/ + └── LAB05.md # This file +``` + +### Why Roles Instead of Monolithic Playbooks? + +Roles provide modular, reusable units of automation. Each role encapsulates a single responsibility — `common` handles base packages, `docker` handles Docker installation, and `app_deploy` handles the application lifecycle. This separation makes it easy to reuse the Docker role in other projects, test roles independently, and maintain clear ownership of each piece of configuration. + +--- + +## 2. Roles Documentation + +### 2.1 Common Role + +**Purpose:** Installs essential system packages and sets the timezone on all managed hosts. + +**Variables (defaults):** +| Variable | Default | Description | +|----------|---------|-------------| +| `common_packages` | `[python3-pip, curl, git, vim, htop, wget, unzip, ca-certificates, gnupg, lsb-release]` | Packages to install | +| `common_timezone` | `Europe/Moscow` | System timezone | + +**Handlers:** None. +**Dependencies:** None. + +### 2.2 Docker Role + +**Purpose:** Installs Docker CE from the official Docker repository, ensures the service is running and enabled, and adds the target user to the `docker` group. + +**Variables (defaults):** +| Variable | Default | Description | +|----------|---------|-------------| +| `docker_user` | `ubuntu` | User to add to docker group | +| `docker_packages` | `[docker-ce, docker-ce-cli, containerd.io, docker-buildx-plugin, docker-compose-plugin]` | Docker packages | + +**Handlers:** +- `Restart docker` — restarts the Docker daemon when configuration changes (triggered by package install). + +**Dependencies:** Requires `common` role to be applied first (for `ca-certificates`, `gnupg`, `curl`). + +### 2.3 App Deploy Role + +**Purpose:** Pulls a Docker image from Docker Hub, removes any existing container, runs the new version, and verifies it is healthy. + +**Variables (defaults):** +| Variable | Default | Description | +|----------|---------|-------------| +| `app_name` | `devops-app` | Application name | +| `app_port` | `8080` | Port to expose | +| `app_container_name` | `{{ app_name }}` | Container name | +| `app_restart_policy` | `unless-stopped` | Docker restart policy | +| `app_env_vars` | `{HOST: 0.0.0.0, PORT: 8080, DEBUG: False}` | Environment variables | + +**Vault variables (inventory/group_vars/all.yml):** +| Variable | Description | +|----------|-------------| +| `dockerhub_username` | Docker Hub username | +| `dockerhub_password` | Docker Hub access token | +| `docker_image` | Image name (`aezuraa/devops-info-service`) | +| `docker_image_tag` | Image tag (`python`) | + +**Handlers:** +- `Restart app container` — restarts the application container. + +**Dependencies:** Requires `docker` role (Docker must be installed and running). + +--- + +## 3. Idempotency Demonstration + +### First Run — `ansible-playbook playbooks/provision.yml` + +``` +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [common : Update apt cache] *********************************************** +changed: [lab04-vm] + +TASK [common : Install common packages] **************************************** +changed: [lab04-vm] + +TASK [common : Set timezone] *************************************************** +changed: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +changed: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +changed: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +changed: [lab04-vm] + +TASK [docker : Ensure Docker service is running and enabled] ******************* +ok: [lab04-vm] + +TASK [docker : Add user to docker group] *************************************** +changed: [lab04-vm] + +TASK [docker : Install python3-docker for Ansible docker modules] ************** +changed: [lab04-vm] + +RUNNING HANDLER [docker : Restart docker] ************************************** +changed: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=13 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +**Analysis:** 9 out of 13 tasks show `changed` — the system was in a fresh state, so packages were installed, Docker was set up from scratch, and the handler fired because Docker packages were installed. + +### Second Run — `ansible-playbook playbooks/provision.yml` + +``` +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [common : Update apt cache] *********************************************** +ok: [lab04-vm] + +TASK [common : Install common packages] **************************************** +ok: [lab04-vm] + +TASK [common : Set timezone] *************************************************** +ok: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +ok: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +ok: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +ok: [lab04-vm] + +TASK [docker : Ensure Docker service is running and enabled] ******************* +ok: [lab04-vm] + +TASK [docker : Add user to docker group] *************************************** +ok: [lab04-vm] + +TASK [docker : Install python3-docker for Ansible docker modules] ************** +ok: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=12 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +**Analysis:** Every task shows `ok` — zero changes. The handler did NOT fire because no task notified it. This proves idempotency: the system is already in the desired state, so Ansible makes no modifications. + +**What makes the roles idempotent:** +- `apt` module with `state: present` — only installs if package is missing +- `apt_key` with `state: present` — only adds the key if it doesn't exist +- `apt_repository` with `state: present` — only adds if not already configured +- `service` with `state: started` — no-op if already running +- `user` with `groups: docker, append: yes` — no-op if user is already in the group +- `cache_valid_time: 3600` — skips apt update if cache is fresh + +--- + +## 4. Ansible Vault Usage + +### How Credentials Are Stored + +All sensitive data (Docker Hub username/password, app configuration) is stored in `ansible/inventory/group_vars/all.yml`, encrypted with Ansible Vault. + +### Vault Password Management + +The vault password is stored in `ansible/.vault_pass` (a plain text file with the password). This file is: +- Listed in `.gitignore` — never committed to Git +- Referenced in `ansible.cfg` via `vault_password_file = .vault_pass` +- Permissions set to `600` (owner-only read/write) + +### Encrypted File Example + +The file `inventory/group_vars/all.yml` looks like this after encryption: + +``` +$ANSIBLE_VAULT;1.1;AES256 +66386530356432313261653635333338316539633935613031633638653464386337613334 +61613837613930306265316637653637663162363833383234363633626566303033616365 +... +``` + +This can be safely committed to Git — the content is AES-256 encrypted and cannot be read without the vault password. + +### Why Ansible Vault Is Important + +Without Vault, credentials would be stored in plaintext YAML files. Even in a private repository, this creates risk: credentials in Git history are permanent, team members may have excessive access, and accidental pushes to public repos would leak secrets. Vault ensures secrets are encrypted at rest and only decrypted during playbook execution. + +--- + +## 5. Deployment Verification + +### Deploy Playbook Run — `ansible-playbook playbooks/deploy.yml` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [app_deploy : Log in to Docker Hub] *************************************** +ok: [lab04-vm] + +TASK [app_deploy : Pull Docker image] ****************************************** +ok: [lab04-vm] + +TASK [app_deploy : Stop and remove existing container] ************************* +ok: [lab04-vm] + +TASK [app_deploy : Run application container] ********************************** +changed: [lab04-vm] + +TASK [app_deploy : Wait for application to be ready] *************************** +ok: [lab04-vm] + +TASK [app_deploy : Verify health endpoint] ************************************* +ok: [lab04-vm] + +TASK [app_deploy : Display health check result] ******************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-02-23T14:53:11.642430+00:00", + "uptime_seconds": 9 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=8 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +### Container Status — `docker ps` + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +8999db58c415 aezuraa/devops-info-service:python "python app.py" 45 seconds ago Up 22 seconds 0.0.0.0:8080->8080/tcp devops-app +``` + +### Health Check Verification + +```bash +$ curl http://84.201.130.19:8080/health +{"status":"healthy","timestamp":"2026-02-23T14:54:49.625553+00:00","uptime_seconds":84} +``` + +### Handler Execution + +The `Restart app container` handler fired during the first deployment because the `Run application container` task was `changed`. On subsequent runs with no container changes, it would not fire. + +--- + +## 6. Key Decisions + +**Why use roles instead of plain playbooks?** +Roles enforce a standard directory structure that separates concerns — tasks, handlers, defaults, and files each live in their own location. This makes it trivial to reuse the `docker` role in any project that needs Docker, without copying and pasting task blocks between playbooks. + +**How do roles improve reusability?** +Each role is self-contained with its own variables, handlers, and tasks. The `docker` role can be dropped into any Ansible project to install Docker on Ubuntu. The `app_deploy` role can be parameterized for any Docker-based application — just override the image name, port, and credentials. + +**What makes a task idempotent?** +A task is idempotent when it checks the current state before acting. Ansible modules like `apt`, `service`, and `user` are inherently idempotent — they compare the desired state (`state: present`, `state: started`) against reality and only make changes when there's a difference. Using `command` or `shell` modules breaks idempotency unless you add `creates`/`removes` guards. + +**How do handlers improve efficiency?** +Handlers only run when notified by a changed task, and they run only once at the end of the play regardless of how many tasks notify them. This prevents unnecessary service restarts — Docker is only restarted when its packages are actually installed or updated, not on every playbook run. + +**Why is Ansible Vault necessary?** +Vault encrypts sensitive data (passwords, API tokens) so they can be stored alongside code in version control. Without Vault, you'd need to manage secrets outside of Git (environment variables, external secret managers), which complicates reproducibility. Vault strikes a balance between security and simplicity for team-sized projects. + +--- + +## 7. Challenges + +- **Yandex Cloud completion file:** The `yandex-cloud/completion.zsh.inc` file (~9MB) was freezing terminal startup. Disabled it in `.zshrc`. +- **Port mapping:** The lab template suggests port 5000, but our app from Labs 1-3 runs on port 8080. Updated all port references and added a security group rule for port 8080. +- **Docker image tag:** CI/CD pipeline from Lab 3 publishes the image as `aezuraa/devops-info-service:python`, not `:latest`. Configured the correct tag in vault variables. +- **VM recreation:** The Yandex Cloud VM from Lab 4 was shut down. Recreated it using the existing Pulumi configuration (`pulumi up`) before running Ansible. +- **group_vars path:** Ansible 2.20 requires `group_vars/` to be inside the inventory directory (adjacent to `hosts.ini`), not at the project root. Moved accordingly. +- **Deprecation warning:** `ansible_distribution_release` auto-injected fact is deprecated in 2.20+. Replaced with `ansible_facts['distribution_release']`. diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md new file mode 100644 index 0000000000..704eb744ca --- /dev/null +++ b/ansible/docs/LAB06.md @@ -0,0 +1,1291 @@ +# Lab 6: Advanced Ansible & CI/CD + +[![Ansible Deployment (Python)](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/ansible-deploy.yml/badge.svg)](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/ansible-deploy.yml) +[![Ansible Deployment (Go Bonus)](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/ansible-deploy-bonus.yml/badge.svg)](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/ansible-deploy-bonus.yml) + +--- + +## Task 1: Blocks & Tags (2 pts) + +### Implementation + +All three roles (`common`, `docker`, `web_app`) were refactored with Ansible blocks, rescue/always sections, and a comprehensive tag strategy. + +### Common Role (`roles/common/tasks/main.yml`) + +Two logical blocks with error handling: + +**Block 1 — Package Installation** (tag: `packages`): + +```yaml +- name: Install system packages + block: + - name: Update apt cache + - name: Install common packages + rescue: + - name: Fix apt cache and retry (apt-get update --fix-missing) + - name: Retry package installation + always: + - name: Log package installation completion (/tmp/ansible_common_packages.log) + become: true + tags: [packages, common] +``` + +**Block 2 — User & System Configuration** (tag: `users`): + +```yaml +- name: Configure users and system + block: + - name: Set timezone + - name: Ensure deploy group exists + - name: Create deploy user + always: + - name: Log user configuration completion (/tmp/ansible_common_users.log) + become: true + tags: [users, common] +``` + +### Docker Role (`roles/docker/tasks/main.yml`) + +**Block 1 — Docker Installation** (tag: `docker_install`): + +```yaml +- name: Install Docker + block: + - name: Install prerequisites + - name: Create keyrings directory + - name: Add Docker GPG key + - name: Add Docker repository + - name: Install Docker packages + rescue: + - name: Wait 10 seconds before retry + - name: Retry apt update after GPG key failure + - name: Retry Docker installation + always: + - name: Ensure Docker service is enabled and started + become: true + tags: [docker_install, docker] +``` + +**Block 2 — Docker Configuration** (tag: `docker_config`): + +```yaml +- name: Configure Docker + block: + - name: Add user to docker group + - name: Install python3-docker for Ansible docker modules + become: true + tags: [docker_config, docker] +``` + +### Tag Strategy + +| Tag | Scope | Purpose | +|-----|-------|---------| +| `common` | Entire common role | Run all common tasks | +| `packages` | Package installation block | Only install packages | +| `users` | User management block | Only configure users | +| `docker` | Entire docker role | Run all docker tasks | +| `docker_install` | Docker installation block | Only install Docker | +| `docker_config` | Docker configuration block | Only configure Docker | +| `web_app` | Entire web_app role | Run all app tasks | +| `app_deploy` | Deployment block | Only deploy application | +| `compose` | Deployment block | Alias for compose deployment | +| `web_app_wipe` | Wipe tasks | Only wipe application | + +### Execution Examples + +```bash +# Run only Docker installation +ansible-playbook playbooks/provision.yml --tags "docker_install" + +# Skip common role entirely +ansible-playbook playbooks/provision.yml --skip-tags "common" + +# Install packages only +ansible-playbook playbooks/provision.yml --tags "packages" + +# List all available tags +ansible-playbook playbooks/site.yml --list-tags +``` + +### Evidence: --list-tags + +``` +$ ansible-playbook playbooks/site.yml --list-tags + +playbook: playbooks/site.yml + + play #1 (webservers): Full infrastructure setup and deployment TAGS: [] + TASK TAGS: [app_deploy, common, compose, docker, docker_config, docker_install, packages, users, web_app, web_app_wipe] +``` + +### Evidence: selective execution + +```bash +ansible-playbook playbooks/provision.yml --tags "docker_install" +``` + +``` +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +ok: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +ok: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +ok: [lab04-vm] + +TASK [docker : Ensure Docker service is enabled and started] ******************* +ok: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=7 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +```bash +ansible-playbook playbooks/provision.yml --tags "packages" +``` + +``` +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [common : Update apt cache] *********************************************** +ok: [lab04-vm] + +TASK [common : Install common packages] **************************************** +ok: [lab04-vm] + +TASK [common : Log package installation completion] **************************** +changed: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=4 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +```bash +ansible-playbook playbooks/provision.yml --skip-tags "common" +``` + +``` +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +ok: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +ok: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +ok: [lab04-vm] + +TASK [docker : Ensure Docker service is enabled and started] ******************* +ok: [lab04-vm] + +TASK [docker : Add user to docker group] *************************************** +ok: [lab04-vm] + +TASK [docker : Install python3-docker for Ansible docker modules] ************** +ok: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=9 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +### Evidence: rescue block triggered + +The rescue block in the `web_app` role was triggered when `docker compose up` failed due to a container name conflict (old container from previous lab still present): + +```bash +ansible-playbook playbooks/deploy.yml +``` + +``` +TASK [web_app : Deploy with docker compose] ************************************ +fatal: [lab04-vm]: FAILED! => {"changed": false, "cmd": ["docker", "compose", "up", "-d", +"--remove-orphans"], "rc": 1, "stderr": " Container devops-app Error response from daemon: +Conflict. The container name \"/devops-app\" is already in use by container \"8999db58c415...\" +You have to remove (or rename) that container to be able to reuse that name."} + +TASK [web_app : Handle deployment failure] ************************************* +ok: [lab04-vm] => { + "msg": "Deployment of devops-app failed. Check logs for details." +} + +TASK [web_app : Show docker compose logs] ************************************** +changed: [lab04-vm] + +TASK [web_app : Display compose logs] ****************************************** +ok: [lab04-vm] => { + "compose_logs.stdout_lines": [] +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=17 changed=1 unreachable=0 failed=0 skipped=5 rescued=1 ignored=1 +``` + +The `rescued=1` counter confirms the rescue block handled the failure gracefully instead of aborting the play. + +### Research Answers + +**Q: What happens if rescue block also fails?** +A: The play fails and execution stops for the host. The always block still runs regardless. The rescue block failure is treated as a normal task failure. + +**Q: Can you have nested blocks?** +A: Yes, blocks can be nested. However, only the outermost block can have rescue/always sections. Nested blocks are useful for grouping tasks with shared directives. + +**Q: How do tags inherit to tasks within blocks?** +A: Tags applied at block level are automatically inherited by all tasks inside the block (including rescue/always). Tasks inside blocks can also have their own additional tags. + +--- + +## Task 2: Docker Compose (3 pts) + +### Role Rename + +Renamed `app_deploy` → `web_app` for better semantics and multi-app readiness: + +```bash +cd ansible/roles && mv app_deploy web_app +``` + +Updated all playbook references (`provision.yml`, `deploy.yml`, `site.yml`). + +### Docker Compose Template + +**File:** `roles/web_app/templates/docker-compose.yml.j2` + +```yaml +--- +services: + {{ app_name }}: + image: {{ docker_image }}:{{ docker_tag }} + container_name: {{ app_name }} + ports: + - "{{ app_port }}:{{ app_internal_port }}" + environment: + HOST: "0.0.0.0" + PORT: "{{ app_internal_port }}" + restart: {{ app_restart_policy }} + networks: + - app_network + +networks: + app_network: + driver: bridge +``` + +All values are Jinja2-templated and configurable via role defaults or external variables. + +### Role Dependencies + +**File:** `roles/web_app/meta/main.yml` + +```yaml +dependencies: + - role: docker +``` + +Running only the `web_app` role automatically triggers Docker installation first. + +### Before / After Comparison + +| Aspect | Before (app_deploy) | After (web_app) | +|--------|---------------------|-----------------| +| Deployment method | `docker run` via `docker_container` module | Docker Compose via template | +| Configuration | Inline in tasks | Templated `docker-compose.yml.j2` | +| Error handling | None | Block/rescue/always | +| Dependencies | Manual (must run docker role first) | Automatic via `meta/main.yml` | +| Multi-app support | No | Yes (same role, different variables) | +| Wipe logic | Not implemented | Variable + tag double-gating | +| Tags | None | `app_deploy`, `compose`, `web_app_wipe` | + +### Variables + +**Default values** (`roles/web_app/defaults/main.yml`): + +| Variable | Default | Purpose | +|----------|---------|---------| +| `app_name` | `devops-app` | Service and container name | +| `docker_image` | `aezuraa/devops-info-service` | Docker Hub image | +| `docker_tag` | `python` | Image version tag | +| `app_port` | `8000` | Host-exposed port | +| `app_internal_port` | `8080` | Container internal port | +| `app_restart_policy` | `unless-stopped` | Container restart policy | +| `compose_project_dir` | `/opt/{{ app_name }}` | Deploy directory on host | +| `web_app_wipe` | `false` | Wipe safety variable | + +### Evidence: first run + +```bash +ansible-playbook playbooks/deploy.yml +``` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +ok: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +ok: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +ok: [lab04-vm] + +TASK [docker : Ensure Docker service is enabled and started] ******************* +ok: [lab04-vm] + +TASK [docker : Add user to docker group] *************************************** +ok: [lab04-vm] + +TASK [docker : Install python3-docker for Ansible docker modules] ************** +ok: [lab04-vm] + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +skipping: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +skipping: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +skipping: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Create app directory] ****************************************** +changed: [lab04-vm] + +TASK [web_app : Template docker-compose file] ********************************** +changed: [lab04-vm] + +TASK [web_app : Log in to Docker Hub] ****************************************** +fatal: [lab04-vm]: FAILED! => {... "no_log: true" ...} +...ignoring + +TASK [web_app : Pull latest image] ********************************************* +ok: [lab04-vm] + +TASK [web_app : Deploy with docker compose] ************************************ +changed: [lab04-vm] + +TASK [web_app : Wait for application to be ready] ****************************** +ok: [lab04-vm] + +TASK [web_app : Verify health endpoint] **************************************** +ok: [lab04-vm] + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T16:54:41.845057+00:00", + "uptime_seconds": 9 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=3 unreachable=0 failed=0 skipped=5 rescued=0 ignored=1 +``` + +First run: `changed=3` — directory created, compose file templated, containers started. + +### Evidence: second run (idempotent) + +```bash +ansible-playbook playbooks/deploy.yml +``` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [lab04-vm] + +TASK [docker : Create keyrings directory] ************************************** +ok: [lab04-vm] + +TASK [docker : Add Docker GPG key] ********************************************* +ok: [lab04-vm] + +TASK [docker : Add Docker repository] ****************************************** +ok: [lab04-vm] + +TASK [docker : Install Docker packages] **************************************** +ok: [lab04-vm] + +TASK [docker : Ensure Docker service is enabled and started] ******************* +ok: [lab04-vm] + +TASK [docker : Add user to docker group] *************************************** +ok: [lab04-vm] + +TASK [docker : Install python3-docker for Ansible docker modules] ************** +ok: [lab04-vm] + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +skipping: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +skipping: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +skipping: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Create app directory] ****************************************** +ok: [lab04-vm] + +TASK [web_app : Template docker-compose file] ********************************** +ok: [lab04-vm] + +TASK [web_app : Log in to Docker Hub] ****************************************** +fatal: [lab04-vm]: FAILED! => {... "no_log: true" ...} +...ignoring + +TASK [web_app : Pull latest image] ********************************************* +ok: [lab04-vm] + +TASK [web_app : Deploy with docker compose] ************************************ +ok: [lab04-vm] + +TASK [web_app : Wait for application to be ready] ****************************** +ok: [lab04-vm] + +TASK [web_app : Verify health endpoint] **************************************** +ok: [lab04-vm] + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T16:56:13.832102+00:00", + "uptime_seconds": 101 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=1 +``` + +Second run: `changed=0` — full idempotency confirmed. All tasks report `ok`, nothing changed. + +### Evidence: VM verification + +``` +$ docker ps +CONTAINER ID IMAGE STATUS PORTS NAMES +57a4e11ab0c8 aezuraa/devops-info-service:python Up 3 minutes 0.0.0.0:8080->8080/tcp, [::]:8080->8080/tcp devops-app + +$ curl -s http://localhost:8080/ | python3 -m json.tool +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 2, + "hostname": "57a4e11ab0c8", + "platform": "Linux", + "python_version": "3.12.12" + }, + "runtime": { + "current_time": "2026-03-04T16:57:43.008174+00:00", + "uptime_human": "3 minutes", + "uptime_seconds": 190 + }, + "endpoints": ["/", "/health"] +} + +$ curl -s http://localhost:8080/health | python3 -m json.tool +{ + "status": "healthy", + "timestamp": "2026-03-04T16:57:43.062120+00:00", + "uptime_seconds": 190 +} +``` + +### Research Answers + +**Q: What's the difference between `restart: always` and `restart: unless-stopped`?** +A: `always` restarts even after a `docker stop` command on daemon restart. `unless-stopped` does NOT restart if the container was explicitly stopped before the daemon restart. `unless-stopped` is safer for production — it respects manual stop actions. + +**Q: How do Docker Compose networks differ from Docker bridge networks?** +A: Compose creates a dedicated bridge network per project with built-in DNS resolution (services can reference each other by name). Default Docker bridge networks don't provide automatic DNS and use legacy `--link` for inter-container communication. + +**Q: Can you reference Ansible Vault variables in the template?** +A: Yes. Vault-encrypted variables are decrypted at playbook runtime and can be referenced in Jinja2 templates like any other variable (e.g., `{{ app_secret_key }}`). + +--- + +## Task 3: Wipe Logic (1 pt) + +### Implementation + +Wipe logic uses **double-gating** — both a variable (`web_app_wipe: true`) and a tag (`--tags web_app_wipe`) must be active for wipe tasks to run. + +**File:** `roles/web_app/tasks/wipe.yml` + +```yaml +- name: Wipe web application + block: + - name: Stop and remove containers via docker compose + - name: Remove docker-compose file + - name: Remove application directory + - name: Remove Docker image + - name: Log wipe completion + when: web_app_wipe | bool + tags: [web_app_wipe] +``` + +**Included at the top of** `roles/web_app/tasks/main.yml` (before deploy block): + +```yaml +- name: Include wipe tasks + include_tasks: wipe.yml + tags: [web_app_wipe] +``` + +### Test Scenarios + +**Scenario 1: Normal deployment (wipe does NOT run)** + +```bash +ansible-playbook playbooks/deploy.yml +``` + +``` +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +skipping: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +skipping: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +skipping: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +skipping: [lab04-vm] + +... + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=3 unreachable=0 failed=0 skipped=5 rescued=0 ignored=1 +``` + +All wipe tasks **skipped** because `web_app_wipe` defaults to `false`. Deployment proceeds normally. + +**Scenario 2: Wipe only** + +```bash +ansible-playbook playbooks/deploy.yml \ + -e "web_app_wipe=true" \ + --tags web_app_wipe +``` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +changed: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +changed: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +changed: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +changed: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +ok: [lab04-vm] => { + "msg": "Application devops-app wiped successfully from /opt/devops-app" +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=7 changed=4 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +All wipe tasks **executed** (`changed=4`). Deployment tasks were NOT run because `--tags web_app_wipe` excluded them. + +**VM verification after wipe:** + +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + +$ ls /opt +containerd +``` + +No containers running, no application directory — wipe successful. + +**Scenario 3: Clean reinstallation (wipe → deploy)** + +```bash +ansible-playbook playbooks/deploy.yml \ + -e "web_app_wipe=true" +``` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +...docker role tasks (ok)... + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +fatal: [lab04-vm]: FAILED! => {"msg": "Unable to change directory before execution."} +...ignoring + +TASK [web_app : Remove docker-compose file] ************************************ +ok: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +ok: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +fatal: [lab04-vm]: FAILED! => {"stderr": "No such image: aezuraa/devops-info-service:python"} +...ignoring + +TASK [web_app : Log wipe completion] ******************************************* +ok: [lab04-vm] => { + "msg": "Application devops-app wiped successfully from /opt/devops-app" +} + +TASK [web_app : Create app directory] ****************************************** +changed: [lab04-vm] + +TASK [web_app : Template docker-compose file] ********************************** +changed: [lab04-vm] + +... + +TASK [web_app : Deploy with docker compose] ************************************ +changed: [lab04-vm] + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T17:00:49.660406+00:00", + "uptime_seconds": 13 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=23 changed=4 unreachable=0 failed=0 skipped=0 rescued=0 ignored=3 +``` + +Wipe ran first (cleanup errors ignored gracefully for already-clean state), then deployment completed successfully with healthy app. + +**Scenario 4: Safety check — tag without variable** + +```bash +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` + +``` +PLAY [Deploy application] ****************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +skipping: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +skipping: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +skipping: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +skipping: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +skipping: [lab04-vm] + +PLAY RECAP ********************************************************************* +lab04-vm : ok=2 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0 +``` + +Wipe tasks were **included** (tag matched) but **skipped** by `when: web_app_wipe | bool` because the variable defaults to `false`. Double-gating works as intended. + +### Research Answers + +**1. Why use both variable AND tag?** +Double safety: the variable prevents accidental wipe if tags are misconfigured, and the tag prevents wipe from running during normal deployments. Neither alone is sufficient — together they ensure wipe only happens when explicitly intended. + +**2. What's the difference between `never` tag and this approach?** +The `never` tag makes tasks never run unless explicitly included with `--tags never`. This approach is more flexible: the variable allows runtime decisions (`-e "web_app_wipe=true"`) while the tag prevents execution during normal playbook runs. The `never` tag approach cannot support the "clean reinstall" scenario (wipe + deploy in one run). + +**3. Why must wipe logic come BEFORE deployment in main.yml?** +For the clean reinstall scenario: old application must be removed before deploying the new one. This ensures a fresh state before the new deployment begins. + +**4. When would you want clean reinstallation vs. rolling update?** +Clean reinstall is best when: changing major versions, debugging persistent state issues, switching application architecture, or fixing corrupted installations. Rolling updates are better for: zero-downtime deployments, minor version bumps, and frequent configuration changes. + +**5. How would you extend this to wipe Docker images and volumes too?** +Add tasks for `docker rmi` (already included), `docker volume prune`, and `docker network prune` to the wipe block. Use `--volumes` flag with `docker compose down` to remove named volumes. + +--- + +## Task 4: CI/CD (3 pts) + +### Workflow Architecture + +Two GitHub Actions workflows for independent deployment: + +``` +Code Push → Path Filter → Ansible Lint → Deploy via Ansible → Verify (curl) +``` + +### Workflow 1: Python App (`.github/workflows/ansible-deploy.yml`) + +- **Trigger:** Push to `master`/`lab06` with changes in `ansible/**` +- **Job 1 — Lint:** Installs `ansible-lint`, validates playbooks and role files +- **Job 2 — Deploy:** Sets up SSH, decrypts vault, runs `deploy_python.yml` +- **Job 3 — Verify:** Curls `http://VM:8000` and `/health` + +### Workflow 2: Go Bonus App (`.github/workflows/ansible-deploy-bonus.yml`) + +- **Trigger:** Push to `master`/`lab06` with changes in bonus-specific paths +- Same job structure, deploys `deploy_bonus.yml` and verifies on port `8001` + +### Path Filters + +| File Changed | Python Workflow | Bonus Workflow | +|-------------|-----------------|----------------| +| `ansible/vars/app_python.yml` | Runs | Skipped | +| `ansible/vars/app_bonus.yml` | Skipped | Runs | +| `ansible/roles/web_app/**` | Runs | Runs | +| `ansible/playbooks/deploy_bonus.yml` | Skipped | Runs | + +### Required GitHub Secrets + +| Secret | Purpose | +|--------|---------| +| `ANSIBLE_VAULT_PASSWORD` | Decrypt vault-encrypted variables | +| `SSH_PRIVATE_KEY` | SSH access to target VM | +| `VM_HOST` | Target VM IP address | +| `VM_USER` | SSH username | + +### Status Badges + +Added to `ansible/docs/LAB06.md` (this report). + +### Evidence: successful workflow runs + +Both workflows triggered by push to `lab06` branch, commit `edba9c6` (`fix(lab06): verify deployment via SSH instead of external curl`): + +**Ansible Deployment (Python)** — [Run #3](https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/22681670266) + +| Job | Duration | Status | +|-----|----------|--------| +| Ansible Lint | 44s | Passed | +| Deploy Python Application | 1m 59s | Passed | + +**Ansible Deployment (Go Bonus)** — [Run #3](https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/22681670287) + +| Job | Duration | Status | +|-----|----------|--------| +| Ansible Lint | 39s | Passed | +| Deploy Go Bonus Application | 2m 3s | Passed | + +### Evidence: ansible-lint and deploy logs + +**Lint step** (both workflows): + +``` +Passed: 0 failure(s), 0 warning(s) on production profile. +``` + +**Deploy step** (Python, key output): + +``` +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "web_app_health_check.json": { + "status": "healthy", + "uptime_seconds": 1658 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0 +``` + +**Verify step** (Python): `curl -sf http://localhost:8000/health` via SSH — success. + +**Verify step** (Go): `curl -sf http://localhost:8001/health` via SSH — success. + +### Research Answers + +**1. Security implications of storing SSH keys in GitHub Secrets?** +Secrets are encrypted at rest and masked in logs, but anyone with write access to the repo can create workflows that read them. Best practices: use deploy keys with minimal permissions, rotate regularly, prefer self-hosted runners for direct access. + +**2. How to implement staging → production pipeline?** +Use separate inventory files (`inventory/staging.ini`, `inventory/production.ini`) and GitHub environments with required approvals. The staging job runs first; production requires manual approval after staging verification. + +**3. What would you add to make rollbacks possible?** +Pin Docker image tags to specific versions (not `latest`). Store the previous tag in a file or variable. Create a rollback playbook that deploys the previous version. Use CalVer tags (`2026.02.27`) for traceability. + +**4. How does self-hosted runner improve security?** +Self-hosted runners eliminate the need to expose SSH keys to GitHub infrastructure. The runner runs inside the trusted network with direct access to targets. No secrets leave the private environment. + +--- + +## Task 5: Documentation + +This file (`ansible/docs/LAB06.md`) serves as the complete documentation for Lab 6. + +--- + +## Bonus Part 1: Multi-App Deployment (1.5 pts) + +### Architecture + +The `web_app` role is reused for both Python and Go applications with different variable files: + +``` +ansible/ +├── vars/ +│ ├── app_python.yml # Python app: port 8000 +│ └── app_bonus.yml # Go app: port 8001 +├── playbooks/ +│ ├── deploy_python.yml # Deploy Python only +│ ├── deploy_bonus.yml # Deploy Go only +│ └── deploy_all.yml # Deploy both apps +└── roles/ + └── web_app/ # Single role for all apps +``` + +### Variable Files + +**Python App** (`vars/app_python.yml`): + +| Variable | Value | +|----------|-------| +| `app_name` | `devops-python` | +| `docker_image` | `aezuraa/devops-info-service` | +| `docker_tag` | `python` | +| `app_port` | `8000` | +| `app_internal_port` | `8080` | + +**Go Bonus App** (`vars/app_bonus.yml`): + +| Variable | Value | +|----------|-------| +| `app_name` | `devops-go` | +| `docker_image` | `aezuraa/devops-info-service` | +| `docker_tag` | `go` | +| `app_port` | `8001` | +| `app_internal_port` | `8080` | + +### Role Reusability + +The `web_app` role is entirely parameterized — the same code handles both apps. Each app gets its own: +- Docker Compose project directory (`/opt/devops-python`, `/opt/devops-go`) +- Container name and network +- Host port mapping (8000 vs 8001) + +### Deployment Commands + +```bash +# Deploy Python only +ansible-playbook playbooks/deploy_python.yml + +# Deploy Go only +ansible-playbook playbooks/deploy_bonus.yml + +# Deploy both apps +ansible-playbook playbooks/deploy_all.yml + +# Wipe only Python app (Go unaffected) +ansible-playbook playbooks/deploy_python.yml \ + -e "web_app_wipe=true" --tags web_app_wipe + +# Wipe only Go app +ansible-playbook playbooks/deploy_bonus.yml \ + -e "web_app_wipe=true" --tags web_app_wipe +``` + +### Port Conflict Resolution + +Apps use different host ports (8000, 8001) to run simultaneously. Both use internal port 8080, which is isolated within their respective Docker containers. + +### Evidence: both apps deployed + +**Deploy Python app:** + +```bash +ansible-playbook playbooks/deploy_python.yml +``` + +``` +PLAY [Deploy Python Application] *********************************************** + +... + +TASK [web_app : Create app directory] ****************************************** +changed: [lab04-vm] + +TASK [web_app : Template docker-compose file] ********************************** +changed: [lab04-vm] + +TASK [web_app : Deploy with docker compose] ************************************ +changed: [lab04-vm] + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T17:03:21.367388+00:00", + "uptime_seconds": 12 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=3 unreachable=0 failed=0 skipped=5 rescued=0 ignored=1 +``` + +**Deploy Go app:** + +```bash +ansible-playbook playbooks/deploy_bonus.yml +``` + +``` +PLAY [Deploy Go (Bonus) Application] ******************************************* + +... + +TASK [web_app : Create app directory] ****************************************** +changed: [lab04-vm] + +TASK [web_app : Template docker-compose file] ********************************** +changed: [lab04-vm] + +TASK [web_app : Deploy with docker compose] ************************************ +changed: [lab04-vm] + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T17:04:55.481275349Z", + "uptime_seconds": 11 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=18 changed=3 unreachable=0 failed=0 skipped=5 rescued=0 ignored=1 +``` + +**Deploy both apps together:** + +```bash +ansible-playbook playbooks/deploy_all.yml +``` + +``` +PLAY [Deploy All Applications] ************************************************* + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [Deploy Python App] ******************************************************* +included: web_app for lab04-vm + +... + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T17:11:02.150239+00:00", + "uptime_seconds": 12 + } +} + +TASK [Deploy Go (Bonus) App] *************************************************** +included: web_app for lab04-vm + +... + +TASK [web_app : Display health check result] *********************************** +ok: [lab04-vm] => { + "health_check.json": { + "status": "healthy", + "timestamp": "2026-03-04T17:11:43.803111981Z", + "uptime_seconds": 153 + } +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=29 changed=1 unreachable=0 failed=0 skipped=10 rescued=0 ignored=2 +``` + +**VM verification — both apps running simultaneously:** + +``` +$ docker ps +CONTAINER ID IMAGE STATUS PORTS NAMES +d6f0d29512da aezuraa/devops-info-service:go Up 24 seconds 0.0.0.0:8001->8080/tcp, [::]:8001->8080/tcp devops-go +eed8d69ac7a1 aezuraa/devops-info-service:python Up 2 minutes 0.0.0.0:8000->8080/tcp, [::]:8000->8080/tcp devops-python + +$ curl -s http://localhost:8000/health +{"status": "healthy", "timestamp": "2026-03-04T17:05:08.591651+00:00", "uptime_seconds": 119} + +$ curl -s http://localhost:8001/health +{"status": "healthy", "timestamp": "2026-03-04T17:05:08.697947633Z", "uptime_seconds": 24} + +$ curl -s http://localhost:8000/ | python3 -m json.tool +{ + "service": { + "name": "devops-info-service", + "framework": "Flask", + ... + } +} + +$ curl -s http://localhost:8001/ | python3 -m json.tool +{ + "service": { + "name": "devops-info-service", + "framework": "Go net/http", + ... + } +} +``` + +Both apps are live: Python on `:8000` (Flask), Go on `:8001` (Go net/http). + +### Evidence: independent wipe + +**Wipe only Python (Go should survive):** + +```bash +ansible-playbook playbooks/deploy_python.yml \ + -e "web_app_wipe=true" --tags web_app_wipe +``` + +``` +PLAY [Deploy Python Application] *********************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [lab04-vm] + +TASK [web_app : Include wipe tasks] ******************************************** +included: roles/web_app/tasks/wipe.yml for lab04-vm + +TASK [web_app : Stop and remove containers via docker compose] ***************** +changed: [lab04-vm] + +TASK [web_app : Remove docker-compose file] ************************************ +changed: [lab04-vm] + +TASK [web_app : Remove application directory] ********************************** +changed: [lab04-vm] + +TASK [web_app : Remove Docker image] ******************************************* +changed: [lab04-vm] + +TASK [web_app : Log wipe completion] ******************************************* +ok: [lab04-vm] => { + "msg": "Application devops-python wiped successfully from /opt/devops-python" +} + +PLAY RECAP ********************************************************************* +lab04-vm : ok=7 changed=4 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +**VM verification after Python wipe:** + +``` +$ docker ps +CONTAINER ID IMAGE STATUS PORTS NAMES +d6f0d29512da aezuraa/devops-info-service:go Up About a minute 0.0.0.0:8001->8080/tcp, [::]:8001->8080/tcp devops-go + +$ curl -s http://localhost:8001/health +{"status": "healthy", "timestamp": "2026-03-04T17:05:59.954314906Z", "uptime_seconds": 76} + +$ curl -s --max-time 3 http://localhost:8000/ +Connection refused — Python app is gone + +$ ls /opt +containerd +devops-go +``` + +Python app wiped, Go app **unaffected** — independent lifecycle confirmed. + +--- + +## Bonus Part 2: Multi-App CI/CD (1 pt) + +### Implementation + +Two separate GitHub Actions workflows (Approach A) for independent deployments: + +1. **`ansible-deploy.yml`** — Triggered by Python-specific and shared Ansible file changes +2. **`ansible-deploy-bonus.yml`** — Triggered by Go-specific and shared Ansible file changes + +### Path Filter Strategy + +Changes to shared files (`roles/web_app/**`) trigger both workflows. Changes to app-specific files trigger only the relevant workflow. Documentation changes are excluded from all triggers. + +### Testing Scenarios + +| Change | Python Workflow | Bonus Workflow | +|--------|:---:|:---:| +| `vars/app_python.yml` | Runs | - | +| `vars/app_bonus.yml` | - | Runs | +| `roles/web_app/**` | Runs | Runs | +| `ansible/docs/**` | - | - | + +### Evidence: both workflows triggered by shared role change + +Commit `edba9c6` modified `ansible/roles/web_app/**` (shared code) — both workflows triggered simultaneously: + +| Workflow | Run | Status | Duration | +|----------|-----|--------|----------| +| [Ansible Deployment (Python)](https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/22681670266) | #3 | Passed | 2m 55s | +| [Ansible Deployment (Go Bonus)](https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/22681670287) | #3 | Passed | 2m 55s | + +This confirms that path filters work correctly: changing a shared role triggers both independent workflows. + +--- + +## Summary + +### What was accomplished + +1. **Blocks & Tags** — All roles refactored with logical grouping, error recovery, and selective execution +2. **Docker Compose** — Migration from imperative `docker run` to declarative Compose templates +3. **Wipe Logic** — Safe cleanup with double-gating (variable + tag) +4. **CI/CD** — Automated lint → deploy → verify pipeline via GitHub Actions +5. **Multi-App** — Role reuse for Python and Go apps with independent lifecycle +6. **Multi-App CI/CD** — Independent deployment triggers via path filters + +### Key Learnings + +- Ansible blocks significantly improve error handling and code organization +- Docker Compose templates with Jinja2 enable flexible, multi-app deployments +- Double-gating (variable + tag) is essential for destructive operations +- Path filters in CI/CD prevent unnecessary deployments and reduce costs + +### Final Directory Structure + +``` +ansible/ +├── ansible.cfg +├── .vault_pass +├── docs/ +│ ├── LAB05.md +│ └── LAB06.md +├── inventory/ +│ ├── hosts.ini +│ └── group_vars/ +│ └── all.yml (encrypted) +├── vars/ +│ ├── app_python.yml +│ └── app_bonus.yml +├── playbooks/ +│ ├── site.yml +│ ├── provision.yml +│ ├── deploy.yml +│ ├── deploy_python.yml +│ ├── deploy_bonus.yml +│ └── deploy_all.yml +└── roles/ + ├── common/ + │ ├── defaults/main.yml + │ └── tasks/main.yml (blocks + tags + rescue) + ├── docker/ + │ ├── defaults/main.yml + │ ├── handlers/main.yml + │ └── tasks/main.yml (blocks + tags + rescue) + └── web_app/ + ├── defaults/main.yml + ├── handlers/main.yml + ├── meta/main.yml (docker dependency) + ├── templates/docker-compose.yml.j2 + └── tasks/ + ├── main.yml (deploy with blocks) + └── wipe.yml (double-gated cleanup) +``` diff --git a/ansible/inventory/group_vars/all.yml b/ansible/inventory/group_vars/all.yml new file mode 100644 index 0000000000..75470b6ecd --- /dev/null +++ b/ansible/inventory/group_vars/all.yml @@ -0,0 +1,22 @@ +$ANSIBLE_VAULT;1.1;AES256 +34376630653530303733623066616665663434346262663031343463663666353337316263393132 +3430643935616264616334663064623132373233313564640a656637393532333339653533393464 +30316437366531376164636462353135633563303438623530383663366363386230326633653865 +3264373063626438380a313435373830626361333139616233343936353263356264626230313237 +30643466373838343932343134393836306237626336356230333930386337656332326334323930 +37353936363362663365336232656231633861353066643236613465626230343865333238356337 +33333064613137303964613539623363313233343733636232366633333432636434333366333263 +31363463643535646164393835656661643233313235633865326561336630663031633337376435 +30306133313434613962346133613763656565326162303066653162366638353231653364363564 +31633564373030383066313437633563316536393233383930306664666262363061306665373462 +39363537626436626433623136323364666432343838613934336132656233333362623032636561 +63353661386662633734336430323164343037666666396263636639393961313739653737363238 +64363561643337616561643666326231326633303630366662333538666666616239343264316334 +33326133333463346134613738626561313037633331633639643231346664646134343463303161 +35303436373734646233316537353463306632663538333432623066643865393737653032383965 +31383830636565663738333734396238343135376532663331353538373862386632643564366362 +39343634663035303233373661386231306237373637323234353538313137386237343465376166 +63373331366633613337346638616136306232323265616565613830626231353431636437333339 +38323866343663636332666261666166356433643166633962646562303762646361316135323463 +61656364646464333430346662663536626632633835646266306332333930363161313238346366 +38343037646262363037623033393539613136663433303362303337393731656665 diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..0f0b2cd79d --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,5 @@ +[webservers] +lab04-vm ansible_host=84.201.130.19 ansible_user=ubuntu + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/ansible/playbooks/deploy-monitoring.yml b/ansible/playbooks/deploy-monitoring.yml new file mode 100644 index 0000000000..4230781db5 --- /dev/null +++ b/ansible/playbooks/deploy-monitoring.yml @@ -0,0 +1,8 @@ +--- +- name: Deploy Monitoring Stack (Loki + Promtail + Grafana) + hosts: webservers + become: true + + roles: + - role: monitoring + tags: [monitoring] diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..f9801cc439 --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,8 @@ +--- +- name: Deploy application + hosts: webservers + become: true + + roles: + - role: web_app + tags: [web_app] diff --git a/ansible/playbooks/deploy_all.yml b/ansible/playbooks/deploy_all.yml new file mode 100644 index 0000000000..c0b2ecc285 --- /dev/null +++ b/ansible/playbooks/deploy_all.yml @@ -0,0 +1,27 @@ +--- +- name: Deploy All Applications + hosts: webservers + become: true + + tasks: + - name: Deploy Python App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-python + docker_image: aezuraa/devops-info-service + docker_tag: python + app_port: 8000 + app_internal_port: 8080 + compose_project_dir: "/opt/devops-python" + + - name: Deploy Go (Bonus) App + ansible.builtin.include_role: + name: web_app + vars: + app_name: devops-go + docker_image: aezuraa/devops-info-service + docker_tag: go + app_port: 8001 + app_internal_port: 8080 + compose_project_dir: "/opt/devops-go" diff --git a/ansible/playbooks/deploy_bonus.yml b/ansible/playbooks/deploy_bonus.yml new file mode 100644 index 0000000000..d6657e8f18 --- /dev/null +++ b/ansible/playbooks/deploy_bonus.yml @@ -0,0 +1,10 @@ +--- +- name: Deploy Go (Bonus) Application + hosts: webservers + become: true + vars_files: + - ../vars/app_bonus.yml + + roles: + - role: web_app + tags: [web_app] diff --git a/ansible/playbooks/deploy_python.yml b/ansible/playbooks/deploy_python.yml new file mode 100644 index 0000000000..cb247496fe --- /dev/null +++ b/ansible/playbooks/deploy_python.yml @@ -0,0 +1,10 @@ +--- +- name: Deploy Python Application + hosts: webservers + become: true + vars_files: + - ../vars/app_python.yml + + roles: + - role: web_app + tags: [web_app] diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..362e19a8b2 --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,10 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + + roles: + - role: common + tags: [common] + - role: docker + tags: [docker] diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..7ce6010196 --- /dev/null +++ b/ansible/playbooks/site.yml @@ -0,0 +1,12 @@ +--- +- name: Full infrastructure setup and deployment + hosts: webservers + become: true + + roles: + - role: common + tags: [common] + - role: docker + tags: [docker] + - role: web_app + tags: [web_app] diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..ebe44ec5bf --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,14 @@ +--- +common_packages: + - python3-pip + - curl + - git + - vim + - htop + - wget + - unzip + - ca-certificates + - gnupg + - lsb-release + +common_timezone: "Europe/Moscow" diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..bc61da57d7 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- name: Install system packages + become: true + tags: + - packages + - common + block: + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + - name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + rescue: + - name: Fix apt cache and retry + ansible.builtin.apt: + update_cache: true + force_apt_get: true + changed_when: true + + - name: Retry package installation + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + always: + - name: Log package installation completion + ansible.builtin.copy: + content: "Package installation completed at {{ ansible_date_time.iso8601 }}" + dest: /tmp/ansible_common_packages.log + mode: "0644" + +- name: Configure users and system + become: true + tags: + - users + - common + block: + - name: Set timezone + community.general.timezone: + name: "{{ common_timezone }}" + + - name: Ensure deploy group exists + ansible.builtin.group: + name: deploy + state: present + + - name: Create deploy user + ansible.builtin.user: + name: "{{ deploy_user | default('deploy') }}" + group: deploy + shell: /bin/bash + create_home: true + state: present + + always: + - name: Log user configuration completion + ansible.builtin.copy: + content: "User configuration completed at {{ ansible_date_time.iso8601 }}" + dest: /tmp/ansible_common_users.log + mode: "0644" diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..8daf52eaae --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,9 @@ +--- +docker_user: ubuntu + +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..07aa0eb290 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..07a9713ae7 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,85 @@ +--- +- name: Install Docker + become: true + tags: + - docker_install + - docker + block: + - name: Install prerequisites for Docker repository + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + state: present + + - name: Create keyrings directory + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + + - name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + keyring: /etc/apt/keyrings/docker.gpg + state: present + + - name: Add Docker repository + ansible.builtin.apt_repository: + repo: >- + deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] + https://download.docker.com/linux/ubuntu + {{ ansible_facts['distribution_release'] }} stable + state: present + filename: docker + + - name: Install Docker packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + notify: Restart docker + + rescue: + - name: Wait before retry + ansible.builtin.pause: + seconds: 10 + + - name: Retry apt update after GPG key failure + ansible.builtin.apt: + update_cache: true + force_apt_get: true + changed_when: true + + - name: Retry Docker installation + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + notify: Restart docker + + always: + - name: Ensure Docker service is enabled and started + ansible.builtin.service: + name: docker + state: started + enabled: true + failed_when: false + +- name: Configure Docker + become: true + tags: + - docker_config + - docker + block: + - name: Add user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: true + + - name: Install python3-docker for Ansible docker modules + ansible.builtin.apt: + name: python3-docker + state: present diff --git a/ansible/roles/monitoring/defaults/main.yml b/ansible/roles/monitoring/defaults/main.yml new file mode 100644 index 0000000000..0146e19adb --- /dev/null +++ b/ansible/roles/monitoring/defaults/main.yml @@ -0,0 +1,55 @@ +--- +# Service versions +loki_version: "3.0.0" +promtail_version: "3.0.0" +grafana_version: "12.3.1" +prometheus_version: "3.9.0" + +# Ports +loki_port: 3100 +promtail_port: 9080 +grafana_port: 3000 +prometheus_port: 9090 + +# Retention +loki_retention_period: "168h" +loki_schema_version: "v13" +prometheus_retention_days: 15 +prometheus_retention_size: "10GB" +prometheus_scrape_interval: "15s" + +# Prometheus scrape targets +prometheus_targets: + - job: "prometheus" + targets: ["localhost:9090"] + - job: "app" + targets: ["app-python:8080"] + path: "/metrics" + - job: "loki" + targets: ["loki:3100"] + path: "/metrics" + - job: "grafana" + targets: ["grafana:3000"] + path: "/metrics" + +# Resource limits +loki_memory_limit: "1G" +loki_cpu_limit: "1.0" +promtail_memory_limit: "512M" +promtail_cpu_limit: "0.5" +grafana_memory_limit: "512M" +grafana_cpu_limit: "0.5" +prometheus_memory_limit: "1G" +prometheus_cpu_limit: "1.0" +app_memory_limit: "256M" +app_cpu_limit: "0.5" + +# Grafana credentials +# grafana_admin_user and grafana_admin_password must be provided +# via Ansible Vault (inventory/group_vars/all.yml) or extra vars. +grafana_admin_user: "admin" +grafana_admin_password: "{{ vault_grafana_admin_password }}" + +# Deployment +monitoring_compose_dir: "/opt/monitoring" +monitoring_network: "logging" diff --git a/ansible/roles/monitoring/files/grafana-app-dashboard.json b/ansible/roles/monitoring/files/grafana-app-dashboard.json new file mode 100644 index 0000000000..6ace8e1836 --- /dev/null +++ b/ansible/roles/monitoring/files/grafana-app-dashboard.json @@ -0,0 +1,319 @@ +{ + "annotations": { + "list": [] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "panels": [ + { + "title": "Service Uptime", + "type": "stat", + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "mappings": [ + { + "options": { + "0": { + "color": "red", + "text": "DOWN" + }, + "1": { + "color": "green", + "text": "UP" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "background", + "graphMode": "none", + "textMode": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + } + }, + "targets": [ + { + "expr": "up{job=\"app\"}", + "legendFormat": "app-python", + "refId": "A" + } + ] + }, + { + "title": "Request Rate by Endpoint", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 6, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "reqps", + "custom": { + "lineWidth": 2, + "fillOpacity": 15, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total[5m])) by (endpoint)", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ] + }, + { + "title": "Error Rate (5xx)", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "reqps", + "color": { + "mode": "fixed", + "fixedColor": "red" + }, + "custom": { + "lineWidth": 2, + "fillOpacity": 30, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total{status=~\"5..\"}[5m]))", + "legendFormat": "5xx errors/s", + "refId": "A" + } + ] + }, + { + "title": "Request Duration p95", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "s", + "custom": { + "lineWidth": 2, + "fillOpacity": 10, + "showPoints": "never" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 0.5 + }, + { + "color": "red", + "value": 1.0 + } + ] + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))", + "legendFormat": "p95 latency", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))", + "legendFormat": "p50 latency", + "refId": "B" + } + ] + }, + { + "title": "Request Duration Heatmap", + "type": "heatmap", + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "datasource": "Prometheus", + "options": { + "calculate": false, + "yAxis": { + "unit": "s" + }, + "color": { + "scheme": "Oranges" + } + }, + "targets": [ + { + "expr": "sum(increase(http_request_duration_seconds_bucket[5m])) by (le)", + "legendFormat": "{{le}}", + "refId": "A", + "format": "heatmap" + } + ] + }, + { + "title": "Active Requests", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short", + "color": { + "mode": "fixed", + "fixedColor": "blue" + }, + "custom": { + "lineWidth": 2, + "fillOpacity": 20, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "http_requests_in_progress", + "legendFormat": "in-progress", + "refId": "A" + } + ] + }, + { + "title": "Status Code Distribution", + "type": "piechart", + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "datasource": "Prometheus", + "options": { + "legend": { + "displayMode": "table", + "placement": "right" + }, + "pieType": "donut" + }, + "targets": [ + { + "expr": "sum by (status) (increase(http_requests_total[5m]))", + "legendFormat": "{{status}}", + "refId": "A" + } + ] + }, + { + "title": "Endpoint Call Count", + "type": "bargauge", + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "options": { + "orientation": "horizontal", + "displayMode": "gradient" + }, + "targets": [ + { + "expr": "sum by (endpoint) (increase(devops_info_endpoint_calls_total[1h]))", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ] + } + ], + "schemaVersion": 39, + "tags": [ + "application", + "metrics", + "RED" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Application Metrics Dashboard", + "uid": "app-metrics-dashboard", + "version": 1, + "refresh": "10s" +} \ No newline at end of file diff --git a/ansible/roles/monitoring/meta/main.yml b/ansible/roles/monitoring/meta/main.yml new file mode 100644 index 0000000000..cb7d8e0460 --- /dev/null +++ b/ansible/roles/monitoring/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/monitoring/tasks/deploy.yml b/ansible/roles/monitoring/tasks/deploy.yml new file mode 100644 index 0000000000..a7603f3426 --- /dev/null +++ b/ansible/roles/monitoring/tasks/deploy.yml @@ -0,0 +1,57 @@ +--- +- name: Pull monitoring images + become: true + ansible.builtin.command: + cmd: docker compose pull + chdir: "{{ monitoring_compose_dir }}" + register: monitoring_pull_result + changed_when: "'Pull complete' in monitoring_pull_result.stdout or 'Downloaded' in monitoring_pull_result.stderr" + +- name: Deploy monitoring stack + become: true + ansible.builtin.command: + cmd: docker compose up -d --remove-orphans + chdir: "{{ monitoring_compose_dir }}" + register: monitoring_compose_result + changed_when: > + 'Started' in monitoring_compose_result.stderr + or 'Created' in monitoring_compose_result.stderr + or monitoring_compose_template.changed + or monitoring_loki_config.changed + or monitoring_promtail_config.changed + or monitoring_prometheus_config.changed + +- name: Wait for Loki to be ready + ansible.builtin.uri: + url: "http://127.0.0.1:{{ loki_port }}/ready" + return_content: true + register: monitoring_loki_health + until: monitoring_loki_health.status == 200 + retries: 12 + delay: 5 + +- name: Wait for Prometheus to be ready + ansible.builtin.uri: + url: "http://127.0.0.1:{{ prometheus_port }}/-/healthy" + return_content: true + register: monitoring_prometheus_health + until: monitoring_prometheus_health.status == 200 + retries: 12 + delay: 5 + +- name: Wait for Grafana to be ready + ansible.builtin.uri: + url: "http://127.0.0.1:{{ grafana_port }}/api/health" + return_content: true + status_code: 200 + register: monitoring_grafana_health + retries: 12 + delay: 5 + +- name: Display deployment status + ansible.builtin.debug: + msg: > + Monitoring stack deployed successfully. + Grafana: http://{{ ansible_host }}:{{ grafana_port }} + Prometheus: http://{{ ansible_host }}:{{ prometheus_port }} + Loki: http://{{ ansible_host }}:{{ loki_port }} diff --git a/ansible/roles/monitoring/tasks/main.yml b/ansible/roles/monitoring/tasks/main.yml new file mode 100644 index 0000000000..d56a05db70 --- /dev/null +++ b/ansible/roles/monitoring/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Set up monitoring infrastructure + ansible.builtin.include_tasks: setup.yml + tags: + - monitoring_setup + +- name: Deploy monitoring stack + ansible.builtin.include_tasks: deploy.yml + tags: + - monitoring_deploy diff --git a/ansible/roles/monitoring/tasks/setup.yml b/ansible/roles/monitoring/tasks/setup.yml new file mode 100644 index 0000000000..6c16fac291 --- /dev/null +++ b/ansible/roles/monitoring/tasks/setup.yml @@ -0,0 +1,73 @@ +--- +- name: Create monitoring directories + become: true + ansible.builtin.file: + path: "{{ item }}" + state: directory + mode: "0755" + loop: + - "{{ monitoring_compose_dir }}" + - "{{ monitoring_compose_dir }}/loki" + - "{{ monitoring_compose_dir }}/promtail" + - "{{ monitoring_compose_dir }}/prometheus" + - "{{ monitoring_compose_dir }}/grafana/provisioning/datasources" + - "{{ monitoring_compose_dir }}/grafana/provisioning/dashboards" + - "{{ monitoring_compose_dir }}/grafana/dashboards" + +- name: Template Loki configuration + become: true + ansible.builtin.template: + src: loki-config.yml.j2 + dest: "{{ monitoring_compose_dir }}/loki/config.yml" + mode: "0644" + register: monitoring_loki_config + +- name: Template Promtail configuration + become: true + ansible.builtin.template: + src: promtail-config.yml.j2 + dest: "{{ monitoring_compose_dir }}/promtail/config.yml" + mode: "0644" + register: monitoring_promtail_config + +- name: Template Prometheus configuration + become: true + ansible.builtin.template: + src: prometheus.yml.j2 + dest: "{{ monitoring_compose_dir }}/prometheus/prometheus.yml" + mode: "0644" + register: monitoring_prometheus_config + +- name: Template Grafana datasources provisioning + become: true + ansible.builtin.template: + src: grafana-datasources.yml.j2 + dest: "{{ monitoring_compose_dir }}/grafana/provisioning/datasources/datasources.yml" + mode: "0644" + register: monitoring_grafana_datasources + +- name: Template Grafana dashboards provisioning + become: true + ansible.builtin.template: + src: grafana-dashboards-provisioning.yml.j2 + dest: "{{ monitoring_compose_dir }}/grafana/provisioning/dashboards/dashboards.yml" + mode: "0644" + register: monitoring_grafana_dashboards_prov + +- name: Copy Grafana dashboard files + become: true + ansible.builtin.copy: + src: "{{ item }}" + dest: "{{ monitoring_compose_dir }}/grafana/dashboards/" + mode: "0644" + loop: + - grafana-app-dashboard.json + register: monitoring_grafana_dashboards + +- name: Template Docker Compose file + become: true + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ monitoring_compose_dir }}/docker-compose.yml" + mode: "0644" + register: monitoring_compose_template diff --git a/ansible/roles/monitoring/templates/docker-compose.yml.j2 b/ansible/roles/monitoring/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..d67eb266f7 --- /dev/null +++ b/ansible/roles/monitoring/templates/docker-compose.yml.j2 @@ -0,0 +1,139 @@ +--- +services: + loki: + image: grafana/loki:{{ loki_version }} + container_name: loki + ports: + - "{{ loki_port }}:3100" + volumes: + - ./loki/config.yml:/etc/loki/config.yml:ro + - loki-data:/loki + command: -config.file=/etc/loki/config.yml + networks: + - {{ monitoring_network }} + labels: + logging: "promtail" + app: "loki" + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: '{{ loki_cpu_limit }}' + memory: {{ loki_memory_limit }} + reservations: + cpus: '0.25' + memory: 256M + restart: unless-stopped + + promtail: + image: grafana/promtail:{{ promtail_version }} + container_name: promtail + volumes: + - ./promtail/config.yml:/etc/promtail/config.yml:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + command: -config.file=/etc/promtail/config.yml + networks: + - {{ monitoring_network }} + labels: + logging: "promtail" + app: "promtail" + depends_on: + loki: + condition: service_healthy + deploy: + resources: + limits: + cpus: '{{ promtail_cpu_limit }}' + memory: {{ promtail_memory_limit }} + reservations: + cpus: '0.1' + memory: 128M + restart: unless-stopped + + prometheus: + image: prom/prometheus:v{{ prometheus_version }} + container_name: prometheus + ports: + - "{{ prometheus_port }}:9090" + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.retention.time={{ prometheus_retention_days }}d' + - '--storage.tsdb.retention.size={{ prometheus_retention_size }}' + networks: + - {{ monitoring_network }} + labels: + logging: "promtail" + app: "prometheus" + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9090/-/healthy || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s + deploy: + resources: + limits: + cpus: '{{ prometheus_cpu_limit }}' + memory: {{ prometheus_memory_limit }} + reservations: + cpus: '0.25' + memory: 256M + restart: unless-stopped + + grafana: + image: grafana/grafana:{{ grafana_version }} + container_name: grafana + ports: + - "{{ grafana_port }}:3000" + volumes: + - grafana-data:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning:ro + - ./grafana/dashboards:/var/lib/grafana/dashboards:ro + environment: + - GF_AUTH_ANONYMOUS_ENABLED=false + - GF_SECURITY_ADMIN_USER={{ grafana_admin_user }} + - GF_SECURITY_ADMIN_PASSWORD={{ grafana_admin_password }} + - GF_SECURITY_ALLOW_EMBEDDING=true + networks: + - {{ monitoring_network }} + labels: + logging: "promtail" + app: "grafana" + depends_on: + loki: + condition: service_healthy + prometheus: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s + deploy: + resources: + limits: + cpus: '{{ grafana_cpu_limit }}' + memory: {{ grafana_memory_limit }} + reservations: + cpus: '0.25' + memory: 128M + restart: unless-stopped + +volumes: + loki-data: + grafana-data: + prometheus-data: + +networks: + {{ monitoring_network }}: + driver: bridge diff --git a/ansible/roles/monitoring/templates/grafana-dashboards-provisioning.yml.j2 b/ansible/roles/monitoring/templates/grafana-dashboards-provisioning.yml.j2 new file mode 100644 index 0000000000..eadf91635c --- /dev/null +++ b/ansible/roles/monitoring/templates/grafana-dashboards-provisioning.yml.j2 @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards + foldersFromFilesStructure: false diff --git a/ansible/roles/monitoring/templates/grafana-datasources.yml.j2 b/ansible/roles/monitoring/templates/grafana-datasources.yml.j2 new file mode 100644 index 0000000000..969c655c07 --- /dev/null +++ b/ansible/roles/monitoring/templates/grafana-datasources.yml.j2 @@ -0,0 +1,15 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:{{ prometheus_port }} + isDefault: true + editable: false + + - name: Loki + type: loki + access: proxy + url: http://loki:{{ loki_port }} + editable: false diff --git a/ansible/roles/monitoring/templates/loki-config.yml.j2 b/ansible/roles/monitoring/templates/loki-config.yml.j2 new file mode 100644 index 0000000000..86a3bee0b8 --- /dev/null +++ b/ansible/roles/monitoring/templates/loki-config.yml.j2 @@ -0,0 +1,46 @@ +--- +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + path_prefix: /loki + replication_factor: 1 + ring: + kvstore: + store: inmemory + +schema_config: + configs: + - from: "2024-01-01" + store: tsdb + object_store: filesystem + schema: {{ loki_schema_version }} + index: + prefix: index_ + period: 24h + +storage_config: + tsdb_shipper: + active_index_directory: /loki/tsdb-index + cache_location: /loki/tsdb-cache + filesystem: + directory: /loki/chunks + +limits_config: + retention_period: {{ loki_retention_period }} + reject_old_samples: true + reject_old_samples_max_age: {{ loki_retention_period }} + max_query_series: 500 + max_query_parallelism: 2 + +compactor: + working_directory: /loki/compactor + compaction_interval: 10m + retention_enabled: true + retention_delete_delay: 2h + delete_request_store: filesystem + +analytics: + reporting_enabled: false diff --git a/ansible/roles/monitoring/templates/prometheus.yml.j2 b/ansible/roles/monitoring/templates/prometheus.yml.j2 new file mode 100644 index 0000000000..645dd11b9c --- /dev/null +++ b/ansible/roles/monitoring/templates/prometheus.yml.j2 @@ -0,0 +1,13 @@ +global: + scrape_interval: {{ prometheus_scrape_interval }} + evaluation_interval: {{ prometheus_scrape_interval }} + +scrape_configs: +{% for target in prometheus_targets %} + - job_name: '{{ target.job }}' + static_configs: + - targets: {{ target.targets | to_json }} +{% if target.path is defined %} + metrics_path: '{{ target.path }}' +{% endif %} +{% endfor %} diff --git a/ansible/roles/monitoring/templates/promtail-config.yml.j2 b/ansible/roles/monitoring/templates/promtail-config.yml.j2 new file mode 100644 index 0000000000..a2c0c245a3 --- /dev/null +++ b/ansible/roles/monitoring/templates/promtail-config.yml.j2 @@ -0,0 +1,27 @@ +--- +server: + http_listen_port: {{ promtail_port }} + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:{{ loki_port }}/loki/api/v1/push + +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_label_app'] + target_label: 'app' + - source_labels: ['__meta_docker_container_label_app'] + target_label: 'job' diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..ca37d66767 --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,21 @@ +--- +# Application Configuration +app_name: devops-app +docker_image: aezuraa/devops-info-service +docker_tag: python +app_port: 8000 +app_internal_port: 8080 +app_restart_policy: unless-stopped + +# Docker Compose Config +compose_project_dir: "/opt/{{ app_name }}" + +# Application Environment +app_env_vars: + HOST: "0.0.0.0" + PORT: "{{ app_internal_port }}" + +# Wipe Logic Control — requires BOTH variable=true AND --tags web_app_wipe +# Wipe only: ansible-playbook deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +# Clean install: ansible-playbook deploy.yml -e "web_app_wipe=true" +web_app_wipe: false diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..8c147b9e9a --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: Restart app container + ansible.builtin.command: + cmd: docker compose restart + chdir: "{{ compose_project_dir }}" + changed_when: true + become: true diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..cb7d8e0460 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..13afc6e67e --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,86 @@ +--- +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe + +- name: Deploy application with Docker Compose + become: true + tags: + - app_deploy + - compose + block: + - name: Create app directory + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: directory + mode: "0755" + + - name: Template docker-compose file + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ compose_project_dir }}/docker-compose.yml" + mode: "0644" + register: web_app_compose_template + + - name: Log in to Docker Hub + community.docker.docker_login: + username: "{{ dockerhub_username }}" + password: "{{ dockerhub_password }}" + no_log: true + failed_when: false + + - name: Pull latest image + ansible.builtin.command: + cmd: docker compose pull + chdir: "{{ compose_project_dir }}" + register: web_app_pull_result + changed_when: "'Pull complete' in web_app_pull_result.stdout or 'Downloaded' in web_app_pull_result.stderr" + + - name: Deploy with docker compose + ansible.builtin.command: + cmd: docker compose up -d --remove-orphans + chdir: "{{ compose_project_dir }}" + register: web_app_compose_result + changed_when: > + 'Started' in web_app_compose_result.stderr + or 'Created' in web_app_compose_result.stderr + or web_app_compose_template.changed + + - name: Wait for application to be ready + ansible.builtin.wait_for: + port: "{{ app_port }}" + host: 127.0.0.1 + delay: 5 + timeout: 30 + + - name: Verify health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}/health" + return_content: true + status_code: 200 + register: web_app_health_check + retries: 3 + delay: 5 + + - name: Display health check result + ansible.builtin.debug: + var: web_app_health_check.json + + rescue: + - name: Handle deployment failure + ansible.builtin.debug: + msg: "Deployment of {{ app_name }} failed. Check logs for details." + + - name: Show docker compose logs + ansible.builtin.command: + cmd: docker compose logs --tail=50 + chdir: "{{ compose_project_dir }}" + register: web_app_compose_logs + changed_when: false + failed_when: false + + - name: Display compose logs + ansible.builtin.debug: + var: web_app_compose_logs.stdout_lines + when: web_app_compose_logs is defined diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..afcda426ec --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,35 @@ +--- +- name: Wipe web application + when: web_app_wipe | bool + become: true + tags: + - web_app_wipe + block: + - name: Stop and remove containers via docker compose + ansible.builtin.command: + cmd: docker compose down --remove-orphans + chdir: "{{ compose_project_dir }}" + register: web_app_wipe_down + changed_when: "'Removed' in web_app_wipe_down.stderr or 'Stopped' in web_app_wipe_down.stderr" + failed_when: false + + - name: Remove docker-compose file + ansible.builtin.file: + path: "{{ compose_project_dir }}/docker-compose.yml" + state: absent + + - name: Remove application directory + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: absent + + - name: Remove Docker image + ansible.builtin.command: + cmd: "docker rmi {{ docker_image }}:{{ docker_tag }}" + register: web_app_wipe_rmi + changed_when: web_app_wipe_rmi.rc == 0 + failed_when: false + + - name: Log wipe completion + ansible.builtin.debug: + msg: "Application {{ app_name }} wiped successfully from {{ compose_project_dir }}" diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..142d96692d --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,21 @@ +--- +services: + {{ app_name }}: + image: {{ docker_image }}:{{ docker_tag }} + container_name: {{ app_name }} + ports: + - "{{ app_port }}:{{ app_internal_port }}" + environment: +{% for key, value in app_env_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% if app_secret_key is defined %} + SECRET_KEY: "{{ app_secret_key }}" +{% endif %} + restart: {{ app_restart_policy }} + networks: + - app_network + +networks: + app_network: + driver: bridge diff --git a/ansible/vars/app_bonus.yml b/ansible/vars/app_bonus.yml new file mode 100644 index 0000000000..25b2d6ab24 --- /dev/null +++ b/ansible/vars/app_bonus.yml @@ -0,0 +1,7 @@ +--- +app_name: devops-go +docker_image: aezuraa/devops-info-service +docker_tag: go +app_port: 8001 +app_internal_port: 8080 +compose_project_dir: "/opt/{{ app_name }}" diff --git a/ansible/vars/app_python.yml b/ansible/vars/app_python.yml new file mode 100644 index 0000000000..3ed342028a --- /dev/null +++ b/ansible/vars/app_python.yml @@ -0,0 +1,7 @@ +--- +app_name: devops-python +docker_image: aezuraa/devops-info-service +docker_tag: python +app_port: 8000 +app_internal_port: 8080 +compose_project_dir: "/opt/{{ app_name }}" diff --git a/app_go/.dockerignore b/app_go/.dockerignore new file mode 100644 index 0000000000..842afce9fa --- /dev/null +++ b/app_go/.dockerignore @@ -0,0 +1,21 @@ +# Compiled binary - will be built in container +devops-info-service +devops-info-service-* +*.exe + +# Documentation +docs/ +README.md +*.md + +# Git +.git/ +.gitignore + +# IDE +.vscode/ +.idea/ +*.swp + +# OS +.DS_Store diff --git a/app_go/.gitignore b/app_go/.gitignore new file mode 100644 index 0000000000..076c397605 --- /dev/null +++ b/app_go/.gitignore @@ -0,0 +1,29 @@ +# Binaries +devops-info-service +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary +*.test + +# Output of the go coverage tool +*.out + +# Dependency directories +vendor/ + +# Go workspace file +go.work + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db diff --git a/app_go/Dockerfile b/app_go/Dockerfile new file mode 100644 index 0000000000..17fb512788 --- /dev/null +++ b/app_go/Dockerfile @@ -0,0 +1,50 @@ +# Stage 1: Builder - Full Go environment for compilation +FROM golang:1.23-alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git + +# Set working directory +WORKDIR /build + +# Copy Go module files first for layer caching +COPY go.mod ./ + +# Copy source code +COPY main.go . + +# Build static binary with optimizations +# CGO_ENABLED=0 for static binary (no C dependencies) +# -ldflags to strip debug info and reduce size +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o devops-info-service main.go + +# Stage 2: Runtime - Minimal image for deployment +FROM alpine:3.19 + +# Create non-root user +RUN addgroup -S appuser && adduser -S appuser -G appuser + +# Set working directory +WORKDIR /app + +# Copy only the compiled binary from builder stage +COPY --from=builder /build/devops-info-service . + +# Create data directory for visits persistence +RUN mkdir -p /data + +# Change ownership to non-root user +RUN chown -R appuser:appuser /app /data + +# Switch to non-root user +USER appuser + +# Document port +EXPOSE 8080 + +# Set default environment variables +ENV HOST=0.0.0.0 \ + PORT=8080 + +# Run the binary +CMD ["./devops-info-service"] diff --git a/app_go/README.md b/app_go/README.md new file mode 100644 index 0000000000..5b74404c52 --- /dev/null +++ b/app_go/README.md @@ -0,0 +1,182 @@ +# DevOps Info Service (Go) + +![Go CI](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/go-ci.yml/badge.svg) +[![codecov](https://codecov.io/gh/AEZuraa/DevOps-Core-Course/branch/lab03/graph/badge.svg?flag=go)](https://codecov.io/gh/AEZuraa/DevOps-Core-Course) + +Go implementation of the DevOps Info Service using standard `net/http` library. + +## Prerequisites + +- **Go**: 1.21 or higher + +## Building + +```bash +# Standard build +go build -o devops-info-service main.go + +# Optimized build (smaller size) +go build -ldflags="-s -w" -o devops-info-service main.go +``` + +## Running + +```bash +# Default (port 8080) +./devops-info-service + +# Custom port +PORT=8090 ./devops-info-service + +# Or run directly +go run main.go +``` + +## API Endpoints + +### `GET /` + +Returns service and system information. + +**Request:** +```bash +curl http://localhost:8080/ +``` + +**Response:** +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Go net/http" + }, + "system": { + "hostname": "MacBook-Pro--Egor.local", + "platform": "darwin", + "platform_version": "go1.25.6", + "architecture": "arm64", + "cpu_count": 11, + "go_version": "go1.25.6" + }, + "runtime": { + "uptime_seconds": 41, + "uptime_human": "0 minutes", + "current_time": "2026-01-27T12:51:18.492136Z", + "timezone": "UTC" + }, + "request": { + "client_ip": "[::1]:58274", + "user_agent": "curl/8.7.1", + "method": "GET", + "path": "/" + }, + "endpoints": [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"} + ] +} +``` + +### `GET /visits` + +Returns the current visit counter. The counter increments on each `GET /` request and persists to `/data/visits`. + +**Request:** +```bash +curl http://localhost:8080/visits +``` + +**Response:** +```json +{ + "visits": 42 +} +``` + +### `GET /health` + +Health check endpoint. + +**Request:** +```bash +curl http://localhost:8080/health +``` + +**Response:** +```json +{ + "status": "healthy", + "timestamp": "2026-01-27T12:51:25.44394Z", + "uptime_seconds": 48 +} +``` + +## Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `HOST` | `0.0.0.0` | Server bind address | +| `PORT` | `8080` | Server port | +| `VISITS_FILE` | `/data/visits` | Path to the visits counter file | + +## Docker + +### Build Multi-Stage Image + +```bash +docker build -t aezuraa/devops-info-service:go . +``` + +### Run Container + +```bash +docker run -p 8080:8080 aezuraa/devops-info-service:go +``` + +### Pull from Docker Hub + +```bash +docker pull aezuraa/devops-info-service:go +docker run -p 8080:8080 aezuraa/devops-info-service:go +``` + +### Custom Port + +```bash +docker run -p 8090:8090 -e PORT=8090 aezuraa/devops-info-service:go +``` + +## Testing + +```bash +# Run tests +go test -v ./... + +# Run tests with coverage +go test -v -coverprofile=coverage.out ./... +go tool cover -func=coverage.out +``` + +## Comparison with Python + +**Binary Size:** +- Go: 5.2 MB +- Python venv: 21 MB + +**Container Image Size:** +- Go (multi-stage): 26.2 MB (compressed 7.58 MB) +- Python: 223 MB (compressed 48.4 MB) + +**Memory Usage (Running):** +- Go: 2.9 MB RSS +- Python: 39.4 MB RSS + +**Advantages:** +- **8.5x smaller** container image +- **13.6x less memory** usage +- Single binary deployment +- No external dependencies +- Faster startup (~10-20ms vs ~200-300ms) +- Lower CPU usage diff --git a/app_go/docs/GO.md b/app_go/docs/GO.md new file mode 100644 index 0000000000..e3b960031d --- /dev/null +++ b/app_go/docs/GO.md @@ -0,0 +1,81 @@ +# Why Go for DevOps Info Service? + +## Language Selection: Go + +Go was chosen for the bonus implementation based on its alignment with DevOps practices and microservices architecture. + +## Primary Reasons + +1. **Single Binary Deployment** - No runtime dependencies +2. **Fast Compilation** - Quick build times +3. **Small Binary Size** - ~5MB vs ~21MB for Python with dependencies +4. **Native Concurrency** - Goroutines handle thousands of concurrent requests +5. **DevOps Industry Standard** - Docker, Kubernetes, Terraform all written in Go + +## Language Comparison + +| Feature | Go | Python | Rust | Java | +|---------|-----|--------|------|------| +| **Compilation** | Fast (~1s) | Interpreted | Slow (~10s) | Medium (~5s) | +| **Binary Size** | Small (5-7MB) | Large (21MB+) | Medium (10MB) | Large (30MB+) | +| **Runtime Required** | No | Yes | No | Yes (JVM) | +| **Memory Usage** | Low (~7MB) | Medium (~35MB) | Low (~5MB) | High (~100MB) | +| **Concurrency** | Excellent | Limited (GIL) | Excellent | Good | +| **Learning Curve** | Easy | Easy | Steep | Medium | +| **DevOps Adoption** | Very High | High | Growing | Medium | + +## Performance Characteristics + +### Startup Time +- **Go**: ~10-20ms (instant) +- **Python**: ~300-500ms (import overhead) +- **Java**: ~2-3s (JVM startup) + +### Memory Footprint +- **Go**: ~7MB (minimal runtime) +- **Python**: ~35MB (interpreter + libs) +- **Java**: ~100-150MB (JVM heap) + +## Real-World DevOps Usage + +### Tools Written in Go +- **Docker** - Container runtime +- **Kubernetes** - Container orchestration +- **Terraform** - Infrastructure as Code +- **Prometheus** - Monitoring system +- **Consul** - Service discovery + +### Why These Tools Choose Go +1. **Cross-platform compilation** - Single codebase, compile for any OS/arch +2. **Static linking** - No dependency hell +3. **Built-in networking** - Standard library has excellent HTTP/TCP support +4. **Fast execution** - Close to C performance +5. **Easy deployment** - Copy binary and run + +## Development Experience + +### Pros +- Simple syntax, easy to read +- Fast compilation and feedback +- Excellent standard library (HTTP, JSON, etc.) +- Built-in formatting (`go fmt`) +- Static typing catches errors at compile time +- Great tooling (VS Code, GoLand) + +### Cons +- Verbose error handling +- No default parameter values +- Limited web frameworks compared to Python + + + +## Conclusion + +Go is the **best choice** for this bonus task because: + +1. **Educational Value** - Learn language used in real DevOps tools +2. **Performance** - Fast startup, low memory, high throughput +3. **Deployment** - Single binary simplifies everything +4. **Industry Relevance** - Used by Docker, Kubernetes, Cloud Native tools + +The small learning curve and immediate practical benefits make Go the recommended compiled language for this bonus task. diff --git a/app_go/docs/LAB01.md b/app_go/docs/LAB01.md new file mode 100644 index 0000000000..f2a39bc480 --- /dev/null +++ b/app_go/docs/LAB01.md @@ -0,0 +1,302 @@ +# Lab 01 Bonus — Go Implementation + +## Implementation Overview + +Go implementation of the DevOps Info Service with identical functionality to the Python version. + +## Language Choice: Go + +**Rationale:** Go provides single binary deployment, fast compilation, small binaries, and excellent concurrency. See [GO.md](GO.md) for detailed comparison. + +## Implementation Details + +### Standard Library Only + +Uses only Go standard library: +- `net/http` - HTTP server and routing +- `encoding/json` - JSON serialization +- `runtime` - System information +- `os` - Environment variables +- `time` - Time operations + +**Benefit:** Zero external dependencies, single binary deployment. + +### HTTP Server + +```go +func main() { + host := os.Getenv("HOST") + if host == "" { + host = "0.0.0.0" + } + + port := os.Getenv("PORT") + if port == "" { + port = "8080" + } + + http.HandleFunc("/", mainHandler) + http.HandleFunc("/health", healthHandler) + + addr := fmt.Sprintf("%s:%s", host, port) + http.ListenAndServe(addr, nil) +} +``` + +### Main Endpoint + +```go +func mainHandler(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + notFoundHandler(w, r) + return + } + + uptimeSeconds, uptimeHuman := getUptime() + + info := ServiceInfo{ + Service: Service{ + Name: "devops-info-service", + Version: "1.0.0", + Framework: "Go net/http", + }, + System: System{ + Hostname: getHostname(), + Platform: runtime.GOOS, + Architecture: runtime.GOARCH, + CPUCount: runtime.NumCPU(), + GoVersion: runtime.Version(), + }, + // ... rest of fields + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(info) +} +``` + +### Uptime Calculation + +```go +var startTime = time.Now() + +func getUptime() (int, string) { + duration := time.Since(startTime) + seconds := int(duration.Seconds()) + hours := seconds / 3600 + minutes := (seconds % 3600) / 60 + + var human string + if hours > 0 { + human = fmt.Sprintf("%d hours, %d minutes", hours, minutes) + } else { + human = fmt.Sprintf("%d minutes", minutes) + } + + return seconds, human +} +``` + +## API Documentation + +### Endpoint: `GET /` + +**Request:** +```bash +curl http://localhost:8080/ +``` + +**Response:** + +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Go net/http" + }, + "system": { + "hostname": "MacBook-Pro--Egor.local", + "platform": "darwin", + "platform_version": "go1.25.6", + "architecture": "arm64", + "cpu_count": 11, + "go_version": "go1.25.6" + }, + "runtime": { + "uptime_seconds": 41, + "uptime_human": "0 minutes", + "current_time": "2026-01-27T12:51:18.492136Z", + "timezone": "UTC" + }, + "request": { + "client_ip": "[::1]:58274", + "user_agent": "curl/8.7.1", + "method": "GET", + "path": "/" + }, + "endpoints": [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"} + ] +} +``` + +### Endpoint: `GET /health` + +**Request:** +```bash +curl http://localhost:8080/health +``` + +**Response:** +```json +{ + "status": "healthy", + "timestamp": "2026-01-27T12:51:25.44394Z", + "uptime_seconds": 48 +} +``` + +## Build & Deployment + +### Build Commands + +```bash +# Standard build +go build -o devops-info-service main.go + +# Optimized build (smaller binary) +go build -ldflags="-s -w" -o devops-info-service main.go +``` + +### Binary Size Analysis + +```bash +# Standard build +ls -lh devops-info-service +# Output: 7.6MB + +# Optimized build +go build -ldflags="-s -w" -o devops-info-service main.go +ls -lh devops-info-service +# Output: 5.2MB + +# Compare with Python +du -sh ../app_python/venv +# Output: 21MB +``` + +**Result:** Go binary is **4x smaller** than Python with venv (5.2MB vs 21MB). + +### Running + +```bash +# Run binary +./devops-info-service + +# With custom port +PORT=3000 ./devops-info-service +``` + +## Performance Comparison + +### Memory Usage + +Real measurements on MacBook-Pro--Egor.local: + +```bash +# Check memory usage +ps -o pid,rss,command -p + +# Results: +# Go: RSS = 7 MB +# Python: RSS = 35.5 MB +``` + +**Go uses 5x less memory (7 MB vs 35.5 MB).** + +### Binary/Deployment Size + +```bash +# Go binary (optimized) +ls -lh app_go/devops-info-service +# 5.2M + +# Python with dependencies +du -sh app_python/venv +# 21M +``` + +**Go deployment is 4x smaller (5.2MB vs 21MB).** + +### Startup Time + +Measured startup time: + +- **Go**: ~10-20ms (near instant) +- **Python**: ~300-500ms (Flask framework loading) + +**Go starts 20-30x faster.** + +### Request Handling + +- **Go**: Handles each request in a separate goroutine (lightweight thread) +- **Python Flask**: Single-threaded by default + +**Go's native concurrency provides better performance under load.** + +## Best Practices Applied + +### 1. Error Handling +```go +hostname, err := os.Hostname() +if err != nil { + return "unknown" +} +``` + +### 2. Concurrency Ready +- HTTP server uses goroutines automatically +- Each request handled in separate goroutine + +### 3. Standard Library First +- No external dependencies +- Uses proven stdlib packages + +## Challenges & Solutions + +### Challenge 1: No Built-in Framework + +**Problem:** Python Flask provides routing, JSON, logging out of the box. + +**Solution:** Standard library is sufficient. `net/http` is powerful and well-documented. + +### Challenge 2: Verbose Error Handling + +**Problem:** Go requires explicit error checking everywhere. + +**Solution:** Catches errors at compile time - better than Python runtime surprises. + +## Advantages Over Python Version + +1. **Deployment** - Single binary (5.2MB), no venv, no dependencies +2. **Performance** - 20-30x faster startup, 5x less memory (7MB vs 35.5MB) +3. **Size** - 4x smaller (5.2MB vs 21MB for Python venv) +4. **Type Safety** - Compile-time error checking +5. **Concurrency** - Native goroutines for better request handling + +## Disadvantages + +1. **Development Speed** - More verbose than Python +2. **Dynamic Features** - No REPL, no dynamic typing flexibility +3. **Learning Curve** - Need to learn Go idioms + +## Conclusion + +The Go implementation successfully demonstrates: +- **Same functionality** as Python version +- **Better performance** in all metrics +- **Smaller footprint** for deployment +- **Production-ready** code with standard library only diff --git a/app_go/docs/LAB02.md b/app_go/docs/LAB02.md new file mode 100644 index 0000000000..b54b0121fb --- /dev/null +++ b/app_go/docs/LAB02.md @@ -0,0 +1,423 @@ +# Lab 02 Bonus — Multi-Stage Docker Build (Go) + +## Multi-Stage Build Strategy + +Multi-stage build separates compilation from runtime, resulting in minimal production images. + +### Two-Stage Approach + +**Stage 1 (Builder):** Full Go toolchain for compilation +- Base: `golang:1.23-alpine` (~300MB) +- Contains: Go compiler, build tools, git +- Purpose: Compile application into static binary + +**Stage 2 (Runtime):** Minimal Alpine Linux +- Base: `alpine:3.19` (~7MB) +- Contains: Only essential C libraries +- Purpose: Run the compiled binary + +**Result:** Final image contains only what's needed to run the application. + +## Dockerfile Breakdown + +### Stage 1: Builder + +```dockerfile +FROM golang:1.23-alpine AS builder + +RUN apk add --no-cache git + +WORKDIR /build + +COPY go.mod ./ +COPY main.go . + +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o devops-info-service main.go +``` + +**Key Decisions:** + +1. **golang:1.23-alpine AS builder** - Named stage for reference +2. **apk add git** - Some Go modules may need git for downloading +3. **CGO_ENABLED=0** - Produces static binary with no C dependencies (can run on scratch) +4. **-ldflags="-s -w"** - Strips debug symbols and DWARF info, reduces binary size by ~30% + +### Stage 2: Runtime + +```dockerfile +FROM alpine:3.19 + +RUN addgroup -S appuser && adduser -S appuser -G appuser + +WORKDIR /app + +COPY --from=builder /build/devops-info-service . + +RUN chown appuser:appuser /app/devops-info-service + +USER appuser + +EXPOSE 8080 + +ENV HOST=0.0.0.0 PORT=8080 + +CMD ["./devops-info-service"] +``` + +**Key Decisions:** + +1. **alpine:3.19** - Minimal base (7MB), much smaller than golang:1.23-alpine (300MB) +2. **COPY --from=builder** - Takes only the binary from builder stage +3. **Non-root user** - Security best practice +4. **Static binary** - Works on alpine because CGO_ENABLED=0 + +## Size Comparison with Analysis + +### Image Sizes + +| Stage/Image | Size | Purpose | +|-------------|------|---------| +| **Builder (golang:1.23-alpine)** | ~300 MB | Compilation only (not shipped) | +| **Final (alpine:3.19 + binary)** | 26.2 MB | Production deployment | +| **Go binary alone** | 5.2 MB | The compiled application | + +**Actual Measurement:** +- Alpine base: ~7 MB +- Go binary: 5.2 MB +- Additional layers: ~14 MB +- **Total: 26.2 MB** (compressed: 7.58 MB) + +**Compare to Python:** +- Python container: 223 MB (compressed: 48.4 MB) +- Go container: 26.2 MB (compressed: 7.58 MB) +- **Go is 8.5x smaller** (26.2 MB vs 223 MB) + +### Build Process Output + +```bash +docker build -t devops-info-service-go:latest . +``` + +**Actual Output:** +``` +#1 [internal] load build definition from Dockerfile +#1 transferring dockerfile: 1.12kB done +#1 DONE 0.0s + +#2 [internal] load metadata for docker.io/library/golang:1.23-alpine +#2 DONE 2.7s + +#3 [internal] load metadata for docker.io/library/alpine:3.19 +#3 DONE 2.7s + +#4 [builder 1/6] FROM golang:1.23-alpine@sha256:383395b794dffa5b53012a212365d40c8e37109a626ca30d6151c8348d380b5f +#4 DONE 4.9s + +#5 [stage-1 1/5] FROM alpine:3.19@sha256:6baf43584bcb78f2e5847d1de515f23499913ac9f12bdf834811a3145eb11ca1 +#5 DONE 0.7s + +#6 [builder 2/6] RUN apk add --no-cache git +#6 DONE 2.6s + +#7 [builder 3/6] WORKDIR /build +#7 DONE 0.0s + +#8 [builder 4/6] COPY go.mod ./ +#8 DONE 0.0s + +#9 [builder 5/6] COPY main.go . +#9 DONE 0.0s + +#10 [builder 6/6] RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o devops-info-service main.go +#10 DONE 3.1s + +#11 [stage-1 2/5] RUN addgroup -S appuser && adduser -S appuser -G appuser +#11 DONE 0.1s + +#12 [stage-1 3/5] WORKDIR /app +#12 DONE 0.0s + +#13 [stage-1 4/5] COPY --from=builder /build/devops-info-service . +#13 DONE 0.0s + +#14 [stage-1 5/5] RUN chown appuser:appuser /app/devops-info-service +#14 DONE 0.1s + +#15 exporting to image +#15 naming to docker.io/library/devops-info-service-go:latest done +#15 DONE 0.2s +``` + +**Build Time:** ~16 seconds (first build), ~0.5 seconds (with cache) +**Compilation Time:** 3.1 seconds + +### Size Verification + +```bash +docker images | grep devops-info-service +``` + +**Actual Output:** +``` +devops-info-service-go latest e0349b6f7c2f 26.2MB 7.58MB +devops-info-service-python latest d6ddca86964d 223MB 48.4MB +``` + +**Memory Usage (Running Containers):** +```bash +docker stats --no-stream +``` +``` +NAME MEM USAGE / LIMIT CPU % +devops-go 2.887MiB / 17.54GiB 0.00% +devops-python 39.4MiB / 17.54GiB 0.04% +``` + +**Performance Summary:** +- Go uses **13.6x less memory** (2.9 MB vs 39.4 MB) +- Go image is **8.5x smaller** (26.2 MB vs 223 MB) +- Go startup: <100ms, Python startup: ~200-300ms + +## Why Multi-Stage Builds Matter for Compiled Languages + +### The Problem Without Multi-Stage + +If we used single-stage with golang:1.23-alpine: + +```dockerfile +FROM golang:1.23-alpine +COPY . . +RUN go build -o app main.go +CMD ["./app"] +``` + +**Result:** Final image = ~310 MB (includes entire Go toolchain) + +**Issues:** +- Wasted space: compiler, build tools not needed at runtime +- Security: more packages = more vulnerabilities +- Slow deployments: 310 MB vs 26 MB over network + +### The Solution With Multi-Stage + +**Stage 1:** Compile (large image, discarded) +**Stage 2:** Copy binary only (tiny image, shipped) + +**Benefits:** +1. **91.5% size reduction** (310 MB → 26.2 MB) +2. **Faster deployments** - pull/push 12x faster +3. **Better security** - no compilers or build tools in production +4. **Cost savings** - less bandwidth, less storage +5. **Lower memory footprint** - 2.9 MB vs 39.4 MB for Python + +### Static vs Dynamic Compilation + +**CGO_ENABLED=0** produces static binary: +- **Static:** All dependencies compiled in, works on any Linux +- **Dynamic:** Requires specific system libraries at runtime + +**Why Static for This App:** +- Can use minimal base images (alpine, distroless, even scratch) +- No runtime dependencies beyond kernel +- More portable across different Linux distributions + +**Trade-off:** Static binaries slightly larger (~5MB vs ~3MB) but much more portable. + +## Technical Explanation of Each Stage + +### Builder Stage Purpose + +1. **Provides Go compiler** - `golang:1.23-alpine` includes go, go build, go mod +2. **Downloads dependencies** - `go.mod` triggers automatic dependency resolution +3. **Compiles binary** - Produces standalone executable +4. **Optimizes binary** - `-ldflags` reduces size + +**This stage is ~300MB but never shipped to production.** + +### Runtime Stage Purpose + +1. **Minimal base** - alpine:3.19 is ~7MB, just enough to run the binary +2. **Security setup** - Creates non-root user +3. **Binary only** - Copies the 5.2MB binary, nothing else +4. **Configuration** - Sets environment and command + +**This stage is 26.2MB and this is what gets deployed.** + +### Runtime Testing + +```bash +docker run -p 8080:8080 devops-info-service-go:latest +``` + +**Startup Logs:** +``` +2026/02/02 15:12:10 Starting DevOps Info Service on 0.0.0.0:8080 +2026/02/02 15:12:10 Go version: go1.23.12 +``` + +**Response from container:** +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Go net/http" + }, + "system": { + "hostname": "0ee39037016f", + "platform": "linux", + "platform_version": "go1.23.12", + "architecture": "arm64", + "cpu_count": 11, + "go_version": "go1.23.12" + } +} +``` + +## Security Benefits Analysis + +### Smaller Attack Surface + +**Large Image (310 MB):** +- Contains: compiler, linker, package managers, build tools +- Hundreds of system packages +- Many potential vulnerabilities + +**Small Image (26.2 MB):** +- Contains: minimal C library, shell, Go binary +- ~10 system packages +- Fewer potential vulnerabilities + +**Impact:** Less code = fewer bugs = fewer CVEs to patch. + +### No Build Tools in Production + +**Builder tools removed:** +- Go compiler (potential for code injection attacks) +- Git (potential for repository access) +- Build utilities (could be exploited) + +**Runtime only has:** The binary and minimal OS. + +### Principle of Least Privilege + +Running as non-root + minimal packages = defense in depth. + +If application compromised: +- Attacker has limited user privileges +- Fewer system tools available for escalation +- Smaller surface area to explore + +## Trade-offs and Decisions + +### Alpine vs Distroless vs Scratch + +**Chosen: Alpine (3.19)** + +| Base | Size | Pros | Cons | +|------|------|------|------| +| **alpine:3.19** | 7 MB | Shell for debugging, package manager | Slightly larger | +| **distroless** | 2 MB | More secure (no shell) | Harder to debug | +| **scratch** | 0 MB | Absolute minimum | No debugging at all | + +**Decision:** Alpine provides best balance - small size but still debuggable if needed. For production systems, distroless would be better. + +### Static Linking Trade-off + +**CGO_ENABLED=0:** +- ✅ Portable binary works on any base +- ✅ Can use scratch/distroless +- ❌ Slightly larger binary (+2MB) +- ❌ Some packages won't work without CGO + +**For this app:** No CGO dependencies, so static is perfect choice. + +## Build Commands & Docker Hub + +### Build & Test +```bash +# Build +docker build -t devops-info-service-go:latest . + +# Check size +docker images devops-info-service-go + +# Run +docker run -p 8080:8080 devops-info-service-go:latest + +# Test +curl http://localhost:8080/ +curl http://localhost:8080/health +``` + +### Docker Hub Push + +**Tagging Strategy:** + +Format: `aezuraa/devops-info-service:go` +- Same repository as Python variant for consistency +- `:go` tag identifies the Go implementation +- Allows users to choose language variant: `:python` or `:go` + +**Commands:** +```bash +docker tag devops-info-service-go:latest aezuraa/devops-info-service:go +docker push aezuraa/devops-info-service:go +``` + +**Push Output:** +``` +79abe3af9fe1: Pushed +c351f84db329: Pushed +5711127a7748: Pushed +00511ec3b3c9: Pushed +04508bc088a8: Pushed +ce607938610b: Pushed +go: digest: sha256:e0349b6f7c2f33bb12a477fbb232016698a6dda28f3f038bfeb2814364a4689e size: 856 +``` + +**Repository:** `https://hub.docker.com/r/aezuraa/devops-info-service` + +## Size Reduction Achievement + +**Single-stage (golang:1.23-alpine):** ~310 MB +**Multi-stage (alpine:3.19 + binary):** 26.2 MB +**Reduction:** **91.5% smaller** (11.8x reduction) + +**Compared to Python:** +- Python: 223 MB (compressed 48.4 MB) +- Go: 26.2 MB (compressed 7.58 MB) +- **Go is 8.5x smaller in size** +- **Go uses 13.6x less memory** (2.9 MB vs 39.4 MB) + +## Why This Matters + +**Development Workflow:** +- Faster CI/CD pipelines (smaller images build/push faster) +- Cheaper registry storage costs +- Faster deployments and scaling +- Better developer experience (quick iterations) + +**Production Impact:** +- Lower bandwidth costs +- Faster container startup +- More containers per host (lower memory) +- Reduced security vulnerabilities + +**Educational Value:** +- Demonstrates containerization best practices +- Shows benefits of compiled languages for containers +- Teaches multi-stage build patterns used in production + +## Implementation Summary + +Multi-stage Docker build successfully implemented with: +- Two-stage build process (builder + runtime) +- 91.5% size reduction vs single-stage (310 MB → 26.2 MB) +- 88% smaller than Python version (26.2 MB vs 223 MB) +- 13.6x less memory usage (2.9 MB vs 39.4 MB) +- Static binary for maximum portability +- Non-root user for security +- Production-ready configuration +- Successfully pushed to Docker Hub (aezuraa/devops-info-service:go) diff --git a/app_go/docs/LAB03.md b/app_go/docs/LAB03.md new file mode 100644 index 0000000000..e879cedba4 --- /dev/null +++ b/app_go/docs/LAB03.md @@ -0,0 +1,135 @@ +# Lab 03 Bonus — Go CI with Path Filters & Coverage + +## Second Workflow: Go CI + +**File:** `.github/workflows/go-ci.yml` + +- **Successful workflow run (Go CI):** https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/21912674696 + +### Language-Specific Best Practices + +- **golangci-lint** — industry-standard Go linter (via `golangci/golangci-lint-action@v6`), checks for bugs, style issues, and unused code +- **go test -v** — built-in testing framework with verbose output +- **go test -coverprofile** — native coverage profiling, no external tools needed +- **Multi-stage Docker build** — builder (golang:1.23-alpine) → runtime (alpine:3.19) +- **Static binary** — `CGO_ENABLED=0` produces binary with no external dependencies + +### Versioning Strategy: CalVer (consistent with Python) + +Same strategy as Python for consistency: +- `aezuraa/devops-info-service:go` — rolling latest +- `aezuraa/devops-info-service:go-2026.02.10` — CalVer date +- `aezuraa/devops-info-service:go-abc1234` — commit SHA + +## Path Filter Configuration + +### Python Workflow + +```yaml +on: + push: + paths: + - 'app_python/**' + - '.github/workflows/python-ci.yml' +``` + +### Go Workflow + +```yaml +on: + push: + paths: + - 'app_go/**' + - '.github/workflows/go-ci.yml' +``` + +### How It Works + +| Files Changed | Python CI | Go CI | +|---------------|-----------|-------| +| `app_python/app.py` | Runs | Skipped | +| `app_go/main.go` | Skipped | Runs | +| Both apps | Runs | Runs (parallel) | +| `README.md` only | Skipped | Skipped | + +### Why Path Filters Matter in Monorepos + +- **Save CI minutes:** No unnecessary builds when unrelated code changes +- **Faster feedback:** Each app's CI runs independently and in parallel +- **Less noise:** Developers only see relevant workflow results +- **Cost efficiency:** GitHub Actions billing is per-minute; fewer runs = lower costs + +## Coverage Analysis + +### Go Coverage: 75.9% + +``` +devops-info-service/main.go:68: getHostname 75.0% +devops-info-service/main.go:76: getUptime 100.0% +devops-info-service/main.go:104: getClientIP 100.0% +devops-info-service/main.go:116: mainHandler 100.0% +devops-info-service/main.go:173: healthHandler 100.0% +devops-info-service/main.go:189: notFoundHandler 100.0% +devops-info-service/main.go:198: main 0.0% +total: (statements) 75.9% +``` + +### What's Covered (100%) + +- `mainHandler` — GET /, JSON structure, all fields, content type +- `healthHandler` — GET /health, status, timestamp, uptime +- `notFoundHandler` — 404 response, JSON error +- `getUptime` — all branches (minutes only, hours+minutes, singular/plural) +- `getClientIP` — direct IP, X-Real-IP, X-Forwarded-For headers + +### What's Not Covered and Why + +- **`main()` — 0%:** Starts HTTP server with `ListenAndServe`. This is a blocking call that can't be unit-tested without spawning a full server. Standard practice is to exclude `main()` from coverage. +- **`getHostname()` error branch — 75%:** The `os.Hostname()` error path requires OS-level failure that can't be reliably simulated in tests. + +### Coverage Threshold + +Go CI does not enforce a hard threshold since `main()` (0%) disproportionately affects the number. Effective coverage of testable code is **~90%**. + +### Test Summary + +**22 tests, all passing:** + +``` +=== RUN TestMainHandler_StatusCode --- PASS +=== RUN TestMainHandler_ContentType --- PASS +=== RUN TestMainHandler_ServiceFields --- PASS +=== RUN TestMainHandler_SystemFields --- PASS +=== RUN TestMainHandler_RuntimeFields --- PASS +=== RUN TestMainHandler_RequestFields --- PASS +=== RUN TestMainHandler_Endpoints --- PASS +=== RUN TestHealthHandler_StatusCode --- PASS +=== RUN TestHealthHandler_ContentType --- PASS +=== RUN TestHealthHandler_Fields --- PASS +=== RUN TestNotFoundHandler --- PASS +=== RUN TestMainHandler_NotFoundForWrongPath --- PASS +=== RUN TestGetHostname --- PASS +=== RUN TestGetUptime --- PASS +=== RUN TestGetClientIP --- PASS +=== RUN TestGetClientIP_XRealIP --- PASS +=== RUN TestGetClientIP_XForwardedFor --- PASS +=== RUN TestGetUptime_WithHours --- PASS +=== RUN TestGetUptime_ExactlyOneHourOneMinute--- PASS +=== RUN TestGetUptime_ExactlyOneMinute --- PASS +PASS coverage: 75.9% of statements 0.542s +``` + +## Codecov Integration + +Both workflows upload coverage reports to Codecov: + +```yaml +- name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + file: app_go/coverage.out + flags: go + token: ${{ secrets.CODECOV_TOKEN }} +``` + +Flags (`python` / `go`) allow tracking coverage per-app separately in Codecov dashboard. diff --git a/app_go/docs/screenshots/01-go-build.png b/app_go/docs/screenshots/01-go-build.png new file mode 100644 index 0000000000..a8a8d07176 Binary files /dev/null and b/app_go/docs/screenshots/01-go-build.png differ diff --git a/app_go/docs/screenshots/02-go-running.png b/app_go/docs/screenshots/02-go-running.png new file mode 100644 index 0000000000..45250c9b73 Binary files /dev/null and b/app_go/docs/screenshots/02-go-running.png differ diff --git a/app_go/docs/screenshots/03-go-main-endpoint.png b/app_go/docs/screenshots/03-go-main-endpoint.png new file mode 100644 index 0000000000..a677bfccca Binary files /dev/null and b/app_go/docs/screenshots/03-go-main-endpoint.png differ diff --git a/app_go/docs/screenshots/04-go-health-check.png b/app_go/docs/screenshots/04-go-health-check.png new file mode 100644 index 0000000000..0dd9f82777 Binary files /dev/null and b/app_go/docs/screenshots/04-go-health-check.png differ diff --git a/app_go/docs/screenshots/05-go-size-comparison.png b/app_go/docs/screenshots/05-go-size-comparison.png new file mode 100644 index 0000000000..0d1fa8cea9 Binary files /dev/null and b/app_go/docs/screenshots/05-go-size-comparison.png differ diff --git a/app_go/go.mod b/app_go/go.mod new file mode 100644 index 0000000000..307ce0d1c5 --- /dev/null +++ b/app_go/go.mod @@ -0,0 +1,3 @@ +module devops-info-service + +go 1.21 diff --git a/app_go/main.go b/app_go/main.go new file mode 100644 index 0000000000..2cb4fdcf8f --- /dev/null +++ b/app_go/main.go @@ -0,0 +1,290 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// Global variables +var startTime = time.Now() + +var ( + visitsFile = getEnvDefault("VISITS_FILE", "/data/visits") + visitsMu sync.Mutex +) + +func getEnvDefault(key, fallback string) string { + if v := os.Getenv(key); v != "" { + return v + } + return fallback +} + +func readVisits() int { + data, err := os.ReadFile(visitsFile) + if err != nil { + return 0 + } + n, err := strconv.Atoi(strings.TrimSpace(string(data))) + if err != nil { + return 0 + } + return n +} + +func writeVisits(count int) { + _ = os.MkdirAll(filepath.Dir(visitsFile), 0755) + tmp := visitsFile + ".tmp" + _ = os.WriteFile(tmp, []byte(strconv.Itoa(count)), 0644) + _ = os.Rename(tmp, visitsFile) +} + +func incrementVisits() int { + visitsMu.Lock() + defer visitsMu.Unlock() + count := readVisits() + 1 + writeVisits(count) + return count +} + +// Struct definitions for JSON response +type ServiceInfo struct { + Service Service `json:"service"` + System System `json:"system"` + Runtime Runtime `json:"runtime"` + Request Request `json:"request"` + Endpoints []Endpoint `json:"endpoints"` +} + +type Service struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Framework string `json:"framework"` +} + +type System struct { + Hostname string `json:"hostname"` + Platform string `json:"platform"` + PlatformVersion string `json:"platform_version"` + Architecture string `json:"architecture"` + CPUCount int `json:"cpu_count"` + GoVersion string `json:"go_version"` +} + +type Runtime struct { + UptimeSeconds int `json:"uptime_seconds"` + UptimeHuman string `json:"uptime_human"` + CurrentTime string `json:"current_time"` + Timezone string `json:"timezone"` +} + +type Request struct { + ClientIP string `json:"client_ip"` + UserAgent string `json:"user_agent"` + Method string `json:"method"` + Path string `json:"path"` +} + +type Endpoint struct { + Path string `json:"path"` + Method string `json:"method"` + Description string `json:"description"` +} + +type HealthResponse struct { + Status string `json:"status"` + Timestamp string `json:"timestamp"` + UptimeSeconds int `json:"uptime_seconds"` +} + +type VisitsResponse struct { + Visits int `json:"visits"` +} + +// Helper functions +func getHostname() string { + hostname, err := os.Hostname() + if err != nil { + return "unknown" + } + return hostname +} + +func getUptime() (int, string) { + duration := time.Since(startTime) + seconds := int(duration.Seconds()) + hours := seconds / 3600 + minutes := (seconds % 3600) / 60 + + var human string + if hours > 0 { + hourStr := "hour" + if hours != 1 { + hourStr = "hours" + } + minStr := "minute" + if minutes != 1 { + minStr = "minutes" + } + human = fmt.Sprintf("%d %s, %d %s", hours, hourStr, minutes, minStr) + } else { + minStr := "minute" + if minutes != 1 { + minStr = "minutes" + } + human = fmt.Sprintf("%d %s", minutes, minStr) + } + + return seconds, human +} + +func getClientIP(r *http.Request) string { + // Try to get real IP from headers first + if ip := r.Header.Get("X-Real-IP"); ip != "" { + return ip + } + if ip := r.Header.Get("X-Forwarded-For"); ip != "" { + return ip + } + return r.RemoteAddr +} + +// HTTP Handlers +func mainHandler(w http.ResponseWriter, r *http.Request) { + log.Printf("Request: %s %s", r.Method, r.URL.Path) + + if r.URL.Path != "/" { + notFoundHandler(w, r) + return + } + + visits := incrementVisits() + uptimeSeconds, uptimeHuman := getUptime() + + type ServiceInfoWithVisits struct { + ServiceInfo + Visits int `json:"visits"` + } + + info := ServiceInfoWithVisits{ + ServiceInfo: ServiceInfo{ + Service: Service{ + Name: "devops-info-service", + Version: "1.0.0", + Description: "DevOps course info service", + Framework: "Go net/http", + }, + System: System{ + Hostname: getHostname(), + Platform: runtime.GOOS, + PlatformVersion: runtime.Version(), + Architecture: runtime.GOARCH, + CPUCount: runtime.NumCPU(), + GoVersion: runtime.Version(), + }, + Runtime: Runtime{ + UptimeSeconds: uptimeSeconds, + UptimeHuman: uptimeHuman, + CurrentTime: time.Now().UTC().Format(time.RFC3339Nano), + Timezone: "UTC", + }, + Request: Request{ + ClientIP: getClientIP(r), + UserAgent: r.UserAgent(), + Method: r.Method, + Path: r.URL.Path, + }, + Endpoints: []Endpoint{ + {Path: "/", Method: "GET", Description: "Service information"}, + {Path: "/health", Method: "GET", Description: "Health check"}, + {Path: "/visits", Method: "GET", Description: "Visit counter"}, + }, + }, + Visits: visits, + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(info); err != nil { + log.Printf("Error encoding response: %v", err) + } +} + +func visitsHandler(w http.ResponseWriter, r *http.Request) { + log.Printf("Visits check: %s %s", r.Method, r.URL.Path) + visitsMu.Lock() + count := readVisits() + visitsMu.Unlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(VisitsResponse{Visits: count}); err != nil { + log.Printf("Error encoding response: %v", err) + } +} + +func healthHandler(w http.ResponseWriter, r *http.Request) { + log.Printf("Health check: %s %s", r.Method, r.URL.Path) + + uptimeSeconds, _ := getUptime() + + health := HealthResponse{ + Status: "healthy", + Timestamp: time.Now().UTC().Format(time.RFC3339Nano), + UptimeSeconds: uptimeSeconds, + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(health); err != nil { + log.Printf("Error encoding response: %v", err) + } +} + +func notFoundHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNotFound) + if err := json.NewEncoder(w).Encode(map[string]string{ + "error": "Not Found", + "message": "Endpoint does not exist", + }); err != nil { + log.Printf("Error encoding response: %v", err) + } +} + +func main() { + // Configuration from environment variables + host := os.Getenv("HOST") + if host == "" { + host = "0.0.0.0" + } + + port := os.Getenv("PORT") + if port == "" { + port = "8080" + } + + addr := fmt.Sprintf("%s:%s", host, port) + + // Setup routes + http.HandleFunc("/", mainHandler) + http.HandleFunc("/health", healthHandler) + http.HandleFunc("/visits", visitsHandler) + + // Start server + log.Printf("Starting DevOps Info Service on %s", addr) + log.Printf("Go version: %s", runtime.Version()) + + if err := http.ListenAndServe(addr, nil); err != nil { + log.Fatalf("Server failed to start: %v", err) + } +} diff --git a/app_go/main_test.go b/app_go/main_test.go new file mode 100644 index 0000000000..63c111a382 --- /dev/null +++ b/app_go/main_test.go @@ -0,0 +1,449 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + "time" +) + +func setupTestVisitsFile(t *testing.T) (cleanup func()) { + t.Helper() + dir := t.TempDir() + original := visitsFile + visitsFile = filepath.Join(dir, "visits") + return func() { visitsFile = original } +} + +// --- GET / endpoint tests --- + +func TestMainHandler_StatusCode(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("expected status 200, got %d", w.Code) + } +} + +func TestMainHandler_ContentType(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + ct := w.Header().Get("Content-Type") + if ct != "application/json" { + t.Errorf("expected application/json, got %s", ct) + } +} + +func TestMainHandler_ServiceFields(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + var data ServiceInfo + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data.Service.Name != "devops-info-service" { + t.Errorf("expected service name 'devops-info-service', got %s", data.Service.Name) + } + if data.Service.Version != "1.0.0" { + t.Errorf("expected version '1.0.0', got %s", data.Service.Version) + } + if data.Service.Framework != "Go net/http" { + t.Errorf("expected framework 'Go net/http', got %s", data.Service.Framework) + } +} + +func TestMainHandler_SystemFields(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + var data ServiceInfo + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data.System.Hostname == "" { + t.Error("hostname should not be empty") + } + if data.System.Platform == "" { + t.Error("platform should not be empty") + } + if data.System.CPUCount <= 0 { + t.Errorf("cpu_count should be positive, got %d", data.System.CPUCount) + } + if data.System.GoVersion == "" { + t.Error("go_version should not be empty") + } +} + +func TestMainHandler_RuntimeFields(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + var data ServiceInfo + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data.Runtime.UptimeSeconds < 0 { + t.Errorf("uptime should be non-negative, got %d", data.Runtime.UptimeSeconds) + } + if data.Runtime.Timezone != "UTC" { + t.Errorf("expected timezone 'UTC', got %s", data.Runtime.Timezone) + } + if data.Runtime.CurrentTime == "" { + t.Error("current_time should not be empty") + } +} + +func TestMainHandler_RequestFields(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + req.Header.Set("User-Agent", "TestBot/1.0") + w := httptest.NewRecorder() + mainHandler(w, req) + + var data ServiceInfo + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data.Request.Method != "GET" { + t.Errorf("expected method 'GET', got %s", data.Request.Method) + } + if data.Request.Path != "/" { + t.Errorf("expected path '/', got %s", data.Request.Path) + } + if data.Request.UserAgent != "TestBot/1.0" { + t.Errorf("expected user agent 'TestBot/1.0', got %s", data.Request.UserAgent) + } +} + +func TestMainHandler_Endpoints(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + var data ServiceInfo + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if len(data.Endpoints) != 3 { + t.Fatalf("expected 3 endpoints, got %d", len(data.Endpoints)) + } + + paths := map[string]bool{} + for _, ep := range data.Endpoints { + paths[ep.Path] = true + } + if !paths["/"] { + t.Error("missing / endpoint") + } + if !paths["/health"] { + t.Error("missing /health endpoint") + } + if !paths["/visits"] { + t.Error("missing /visits endpoint") + } +} + +// --- GET /health endpoint tests --- + +func TestHealthHandler_StatusCode(t *testing.T) { + req := httptest.NewRequest("GET", "/health", nil) + w := httptest.NewRecorder() + healthHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("expected status 200, got %d", w.Code) + } +} + +func TestHealthHandler_ContentType(t *testing.T) { + req := httptest.NewRequest("GET", "/health", nil) + w := httptest.NewRecorder() + healthHandler(w, req) + + ct := w.Header().Get("Content-Type") + if ct != "application/json" { + t.Errorf("expected application/json, got %s", ct) + } +} + +func TestHealthHandler_Fields(t *testing.T) { + req := httptest.NewRequest("GET", "/health", nil) + w := httptest.NewRecorder() + healthHandler(w, req) + + var data HealthResponse + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data.Status != "healthy" { + t.Errorf("expected status 'healthy', got %s", data.Status) + } + if data.Timestamp == "" { + t.Error("timestamp should not be empty") + } + if data.UptimeSeconds < 0 { + t.Errorf("uptime should be non-negative, got %d", data.UptimeSeconds) + } +} + +// --- Error handling tests --- + +func TestNotFoundHandler(t *testing.T) { + req := httptest.NewRequest("GET", "/nonexistent", nil) + w := httptest.NewRecorder() + notFoundHandler(w, req) + + if w.Code != http.StatusNotFound { + t.Errorf("expected status 404, got %d", w.Code) + } + + var data map[string]string + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + + if data["error"] != "Not Found" { + t.Errorf("expected error 'Not Found', got %s", data["error"]) + } +} + +func TestMainHandler_NotFoundForWrongPath(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/wrong", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + if w.Code != http.StatusNotFound { + t.Errorf("expected status 404, got %d", w.Code) + } +} + +// --- Helper function tests --- + +func TestGetHostname(t *testing.T) { + hostname := getHostname() + if hostname == "" { + t.Error("hostname should not be empty") + } +} + +func TestGetUptime(t *testing.T) { + seconds, human := getUptime() + if seconds < 0 { + t.Errorf("uptime should be non-negative, got %d", seconds) + } + if human == "" { + t.Error("human uptime should not be empty") + } +} + +func TestGetClientIP(t *testing.T) { + req := httptest.NewRequest("GET", "/", nil) + ip := getClientIP(req) + if ip == "" { + t.Error("client IP should not be empty") + } +} + +func TestGetClientIP_XRealIP(t *testing.T) { + req := httptest.NewRequest("GET", "/", nil) + req.Header.Set("X-Real-IP", "10.0.0.1") + ip := getClientIP(req) + if ip != "10.0.0.1" { + t.Errorf("expected '10.0.0.1', got %s", ip) + } +} + +func TestGetClientIP_XForwardedFor(t *testing.T) { + req := httptest.NewRequest("GET", "/", nil) + req.Header.Set("X-Forwarded-For", "10.0.0.2") + ip := getClientIP(req) + if ip != "10.0.0.2" { + t.Errorf("expected '10.0.0.2', got %s", ip) + } +} + +func TestGetUptime_WithHours(t *testing.T) { + // Save original startTime and restore after test + original := startTime + defer func() { startTime = original }() + + // Set startTime to 2 hours and 5 minutes ago + startTime = time.Now().Add(-2*time.Hour - 5*time.Minute) + + seconds, human := getUptime() + if seconds < 7200 { + t.Errorf("expected at least 7200 seconds, got %d", seconds) + } + if human == "" { + t.Error("human uptime should not be empty") + } + // Should contain "hour" when uptime > 1 hour + if !contains(human, "hour") { + t.Errorf("expected 'hour' in human string, got %s", human) + } +} + +func TestGetUptime_ExactlyOneHourOneMinute(t *testing.T) { + original := startTime + defer func() { startTime = original }() + + // Exactly 1 hour 1 minute — tests singular "hour" and "minute" + startTime = time.Now().Add(-1*time.Hour - 1*time.Minute) + + _, human := getUptime() + if !contains(human, "hour,") { + t.Errorf("expected singular 'hour' in string, got %s", human) + } +} + +func TestGetUptime_ExactlyOneMinute(t *testing.T) { + original := startTime + defer func() { startTime = original }() + + // Exactly 1 minute — tests singular "minute" in else branch + startTime = time.Now().Add(-1 * time.Minute) + + _, human := getUptime() + if !contains(human, "minute") { + t.Errorf("expected 'minute' in string, got %s", human) + } +} + +// --- Visits counter tests --- + +func TestVisitsHandler_StatusCode(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/visits", nil) + w := httptest.NewRecorder() + visitsHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("expected status 200, got %d", w.Code) + } +} + +func TestVisitsHandler_ReturnsZeroInitially(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + req := httptest.NewRequest("GET", "/visits", nil) + w := httptest.NewRecorder() + visitsHandler(w, req) + + var data VisitsResponse + if err := json.NewDecoder(w.Body).Decode(&data); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + if data.Visits != 0 { + t.Errorf("expected 0 visits initially, got %d", data.Visits) + } +} + +func TestIncrementVisits(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + + v1 := incrementVisits() + if v1 != 1 { + t.Errorf("expected 1 after first increment, got %d", v1) + } + v2 := incrementVisits() + if v2 != 2 { + t.Errorf("expected 2 after second increment, got %d", v2) + } +} + +func TestReadVisits_InvalidContent(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + if err := os.MkdirAll(filepath.Dir(visitsFile), 0755); err != nil { + t.Fatalf("failed to create dir: %v", err) + } + if err := os.WriteFile(visitsFile, []byte("not-a-number"), 0644); err != nil { + t.Fatalf("failed to write file: %v", err) + } + if v := readVisits(); v != 0 { + t.Errorf("expected 0 for invalid content, got %d", v) + } +} + +func TestMainHandler_VisitsIncrement(t *testing.T) { + cleanup := setupTestVisitsFile(t) + defer cleanup() + + for i := 1; i <= 3; i++ { + req := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + mainHandler(w, req) + + var raw map[string]interface{} + if err := json.NewDecoder(w.Body).Decode(&raw); err != nil { + t.Fatalf("failed to decode JSON: %v", err) + } + visits := int(raw["visits"].(float64)) + if visits != i { + t.Errorf("request %d: expected visits=%d, got %d", i, i, visits) + } + } +} + +func TestGetEnvDefault_Fallback(t *testing.T) { + v := getEnvDefault("UNLIKELY_ENV_VAR_FOR_TEST_XYZ", "fallback") + if v != "fallback" { + t.Errorf("expected fallback, got %s", v) + } +} + +func TestGetEnvDefault_Set(t *testing.T) { + t.Setenv("TEST_ENV_VAR_GO_CI", "custom") + v := getEnvDefault("TEST_ENV_VAR_GO_CI", "fallback") + if v != "custom" { + t.Errorf("expected custom, got %s", v) + } +} + +// helper +func contains(s, substr string) bool { + return len(s) >= len(substr) && searchString(s, substr) +} + +func searchString(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..a189a00559 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,41 @@ +# Virtual environment - not needed in container +venv/ +env/ +ENV/ +*.venv + +# Python cache +__pycache__/ +*.py[cod] +*$py.class +*.so + +# Tests - not needed in production image +tests/ +*.test + +# Documentation - not needed at runtime +docs/ +README.md +*.md + +# Git +.git/ +.gitignore +.gitattributes + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +.DS_Store + +# Logs +*.log +*.pid + +# Build artifacts +dist/ +build/ +*.egg-info/ diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..71ab9ae2d9 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,32 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +venv/ +.venv/ +env/ +ENV/ +*.log +*.pid + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Testing +.pytest_cache/ +.coverage +coverage.xml +htmlcov/ + +# Distribution +dist/ +build/ +*.egg-info/ diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..cfa2abd02c --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,38 @@ +# Use specific Python version - slim variant for smaller size +FROM python:3.12-slim + +# Create non-root user for security +RUN groupadd -r appuser && useradd -r -g appuser appuser + +# Set working directory +WORKDIR /app + +# Copy only requirements first for layer caching optimization +# If requirements don't change, Docker reuses this layer +COPY requirements.txt . + +# Install dependencies as root (needed for pip install) +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY app.py . + +# Create data directory for visits persistence +RUN mkdir -p /data + +# Change ownership to non-root user +RUN chown -R appuser:appuser /app /data + +# Switch to non-root user +USER appuser + +# Document which port the app uses +EXPOSE 8080 + +# Set default environment variables +ENV HOST=0.0.0.0 \ + PORT=8080 \ + DEBUG=False + +# Run the application +CMD ["python", "app.py"] diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..a7846e1265 --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,255 @@ +# DevOps Info Service + +![Python CI](https://github.com/AEZuraa/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg) +[![codecov](https://codecov.io/gh/AEZuraa/DevOps-Core-Course/branch/lab03/graph/badge.svg?flag=python)](https://codecov.io/gh/AEZuraa/DevOps-Core-Course) + +A lightweight web service that provides comprehensive system and runtime information for DevOps monitoring and diagnostics. + +## Overview + +DevOps Info Service is a Python-based web application that exposes system information, runtime metrics, and health status through a simple REST API. Built with Flask, it serves as a foundation for learning DevOps practices including containerization, CI/CD, and monitoring. + +## Prerequisites + +- **Python**: 3.11 or higher +- **pip**: Latest version recommended +- **Virtual environment**: venv or virtualenv + +## Installation + +1. **Clone the repository** (if not already done) + +2. **Create and activate virtual environment:** + +```bash +python -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate +``` + +3. **Install dependencies:** + +```bash +pip install -r requirements.txt +``` + +## Running the Application + +**Default configuration** (listens on 0.0.0.0:8080): + +```bash +python app.py +``` + +**Custom port:** + +```bash +PORT=3000 python app.py +``` + +**Custom host and port:** + +```bash +HOST=127.0.0.1 PORT=8080 python app.py +``` + +**Enable debug mode:** + +```bash +DEBUG=true python app.py +``` + +## API Endpoints + +### `GET /` + +Returns comprehensive service and system information including: +- Service metadata (name, version, framework) +- System information (hostname, platform, CPU count) +- Runtime metrics (uptime, current time) +- Request details (client IP, user agent) +- Available endpoints + +**Example request:** + +```bash +curl http://localhost:8080/ +``` + +**Example response:** + +```json +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Health check", + "method": "GET", + "path": "/health" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.7.1" + }, + "runtime": { + "current_time": "2026-01-27T10:12:24.616261+00:00", + "timezone": "UTC", + "uptime_human": "1 hour, 2 minutes", + "uptime_seconds": 3765 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "arm64", + "cpu_count": 11, + "hostname": "MacBook-Pro--Egor.local", + "platform": "Darwin", + "platform_version": "Darwin Kernel Version 25.2.0: Tue Nov 18 21:09:45 PST 2025; root:xnu-12377.61.12~1/RELEASE_ARM64_T6030", + "python_version": "3.12.3" + } +} +``` + +### `GET /visits` + +Returns the current visit counter value. The counter is incremented on each `GET /` request and persisted to `/data/visits`. + +**Example request:** + +```bash +curl http://localhost:8080/visits +``` + +**Example response:** + +```json +{ + "visits": 42 +} +``` + +### `GET /health` + +Simple health check endpoint for monitoring systems and Kubernetes probes. + +**Example request:** + +```bash +curl http://localhost:8080/health +``` + +**Example response:** + +```json +{ + "status": "healthy", + "timestamp": "2026-01-27T10:15:11.908501+00:00", + "uptime_seconds": 3932 +} +``` + +**Status:** Always returns HTTP 200 when service is running. + +## Configuration + +The application supports the following environment variables: + +| Variable | Default | Description | +|----------|---------|-------------| +| `HOST` | `0.0.0.0` | Server bind address | +| `PORT` | `8080` | Server port | +| `DEBUG` | `False` | Enable Flask debug mode | +| `VISITS_FILE` | `/data/visits` | Path to the visits counter file | + +## Project Structure + +``` +app_python/ +├── app.py # Main application +├── requirements.txt # Python dependencies +├── .gitignore # Git ignore rules +├── README.md # This file +├── tests/ # Unit tests +│ └── __init__.py +└── docs/ # Lab documentation + ├── LAB01.md + └── screenshots/ +``` + +## Development + +The application follows Python best practices: +- PEP 8 code style +- Clean code organization +- Proper error handling +- Structured logging +- Environment-based configuration + +## Docker + +### Build Image + +```bash +docker build -t aezuraa/devops-info-service:python . +``` + +### Run Container + +```bash +docker run -p 8080:8080 aezuraa/devops-info-service:python +``` + +### Pull from Docker Hub + +```bash +docker pull aezuraa/devops-info-service:python +docker run -p 8080:8080 aezuraa/devops-info-service:python +``` + +### Custom Configuration + +```bash +# Custom port +docker run -p 3000:3000 -e PORT=3000 aezuraa/devops-info-service:python + +# Debug mode +docker run -p 8080:8080 -e DEBUG=true aezuraa/devops-info-service:python +``` + +## Testing + +### Unit Tests + +```bash +# Install dev dependencies +pip install -r requirements-dev.txt + +# Run tests +pytest tests/ -v + +# Run tests with coverage +pytest tests/ -v --cov=. --cov-report=term-missing +``` + +### Manual Testing + +```bash +# Test main endpoint +curl http://localhost:8080/ + +# Test health check +curl http://localhost:8080/health + +# Pretty print with jq +curl -s http://localhost:8080/ | jq +``` diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..b83c56e661 --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,321 @@ +""" +DevOps Info Service +Main application module +""" +import os +import json +import time +import socket +import platform +import logging +import threading +from datetime import datetime, timezone +from flask import Flask, jsonify, request +from prometheus_client import Counter, Histogram, Gauge, generate_latest, CONTENT_TYPE_LATEST + + +class JSONFormatter(logging.Formatter): + """Format log records as JSON for structured log aggregation.""" + + def format(self, record): + log_data = { + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'level': record.levelname, + 'logger': record.name, + 'message': record.getMessage(), + } + if hasattr(record, 'method'): + log_data['method'] = record.method + if hasattr(record, 'path'): + log_data['path'] = record.path + if hasattr(record, 'status_code'): + log_data['status_code'] = record.status_code + if hasattr(record, 'client_ip'): + log_data['client_ip'] = record.client_ip + if record.exc_info: + log_data['exception'] = self.formatException(record.exc_info) + return json.dumps(log_data) + + +app = Flask(__name__) + +# --- Prometheus metrics (RED method) --- +http_requests_total = Counter( + 'http_requests_total', + 'Total HTTP requests', + ['method', 'endpoint', 'status'] +) + +http_request_duration_seconds = Histogram( + 'http_request_duration_seconds', + 'HTTP request duration in seconds', + ['method', 'endpoint'], + buckets=[0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0] +) + +http_requests_in_progress = Gauge( + 'http_requests_in_progress', + 'HTTP requests currently being processed' +) + +devops_info_endpoint_calls = Counter( + 'devops_info_endpoint_calls_total', + 'Business-level endpoint call counter', + ['endpoint'] +) + +devops_info_system_collection_seconds = Histogram( + 'devops_info_system_collection_seconds', + 'Time spent collecting system information' +) + +handler = logging.StreamHandler() +handler.setFormatter(JSONFormatter()) +logging.root.handlers = [handler] +logging.root.setLevel(logging.INFO) + +werkzeug_logger = logging.getLogger('werkzeug') +werkzeug_logger.handlers = [handler] +werkzeug_logger.setLevel(logging.WARNING) + +logger = logging.getLogger(__name__) + +# Configuration +HOST = os.getenv('HOST', '0.0.0.0') +PORT = int(os.getenv('PORT', 8080)) +DEBUG = os.getenv('DEBUG', 'False').lower() == 'true' +VISITS_FILE = os.getenv('VISITS_FILE', '/data/visits') + +# Application start time +START_TIME = datetime.now(timezone.utc) + +# Thread-safe visits counter backed by file +_visits_lock = threading.Lock() + + +def _read_visits(): + try: + with open(VISITS_FILE, 'r') as f: + return int(f.read().strip()) + except (FileNotFoundError, ValueError): + return 0 + + +def _write_visits(count): + os.makedirs(os.path.dirname(VISITS_FILE), exist_ok=True) + tmp = VISITS_FILE + '.tmp' + with open(tmp, 'w') as f: + f.write(str(count)) + os.replace(tmp, VISITS_FILE) + + +def increment_visits(): + with _visits_lock: + count = _read_visits() + 1 + _write_visits(count) + return count + + +def get_system_info(): + """Collect system information.""" + with devops_info_system_collection_seconds.time(): + return { + 'hostname': socket.gethostname(), + 'platform': platform.system(), + 'platform_version': platform.version(), + 'architecture': platform.machine(), + 'cpu_count': os.cpu_count(), + 'python_version': platform.python_version() + } + + +def get_uptime(): + """Calculate application uptime.""" + delta = datetime.now(timezone.utc) - START_TIME + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + + # Format human-readable uptime + if hours > 0: + human = f"{hours} hour{'s' if hours != 1 else ''}, {minutes} minute{'s' if minutes != 1 else ''}" + else: + human = f"{minutes} minute{'s' if minutes != 1 else ''}" + + return { + 'seconds': seconds, + 'human': human + } + + +@app.before_request +def before_request_hook(): + """Track request start time and in-progress gauge.""" + if request.path == '/metrics': + return + request._start_time = time.monotonic() + http_requests_in_progress.inc() + logger.info( + 'Incoming request', + extra={ + 'method': request.method, + 'path': request.path, + 'client_ip': request.remote_addr, + } + ) + + +@app.after_request +def after_request_hook(response): + """Record metrics and log response.""" + if request.path == '/metrics': + return response + + endpoint = request.path + method = request.method + status = str(response.status_code) + + http_requests_total.labels(method=method, endpoint=endpoint, status=status).inc() + + duration = time.monotonic() - getattr(request, '_start_time', time.monotonic()) + http_request_duration_seconds.labels(method=method, endpoint=endpoint).observe(duration) + + http_requests_in_progress.dec() + + logger.info( + 'Request completed', + extra={ + 'method': method, + 'path': endpoint, + 'status_code': response.status_code, + 'client_ip': request.remote_addr, + } + ) + return response + + +@app.route('/metrics') +def metrics(): + """Prometheus metrics endpoint.""" + return generate_latest(), 200, {'Content-Type': CONTENT_TYPE_LATEST} + + +@app.route('/') +def index(): + """Main endpoint - service and system information.""" + devops_info_endpoint_calls.labels(endpoint='/').inc() + visits = increment_visits() + uptime = get_uptime() + system = get_system_info() + + response = { + 'service': { + 'name': 'devops-info-service', + 'version': '1.0.0', + 'description': 'DevOps course info service', + 'framework': 'Flask' + }, + 'visits': visits, + 'system': system, + 'runtime': { + 'uptime_seconds': uptime['seconds'], + 'uptime_human': uptime['human'], + 'current_time': datetime.now(timezone.utc).isoformat(), + 'timezone': 'UTC' + }, + 'request': { + 'client_ip': request.remote_addr, + 'user_agent': request.headers.get('User-Agent', 'Unknown'), + 'method': request.method, + 'path': request.path + }, + 'endpoints': [ + { + 'path': '/', + 'method': 'GET', + 'description': 'Service information' + }, + { + 'path': '/health', + 'method': 'GET', + 'description': 'Health check' + }, + { + 'path': '/visits', + 'method': 'GET', + 'description': 'Visit counter' + }, + { + 'path': '/metrics', + 'method': 'GET', + 'description': 'Prometheus metrics' + } + ] + } + + return jsonify(response) + + +@app.route('/visits') +def visits(): + """Return current visit count.""" + devops_info_endpoint_calls.labels(endpoint='/visits').inc() + return jsonify({'visits': _read_visits()}) + + +@app.route('/health') +def health(): + """Health check endpoint for monitoring.""" + devops_info_endpoint_calls.labels(endpoint='/health').inc() + uptime = get_uptime() + + return jsonify({ + 'status': 'healthy', + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'uptime_seconds': uptime['seconds'] + }) + + +@app.errorhandler(404) +def not_found(error): + """Handle 404 errors.""" + logger.warning( + 'Not found', + extra={ + 'method': request.method, + 'path': request.path, + 'status_code': 404, + 'client_ip': request.remote_addr, + } + ) + return jsonify({ + 'error': 'Not Found', + 'message': 'Endpoint does not exist' + }), 404 + + +@app.errorhandler(500) +def internal_error(error): + """Handle 500 errors.""" + logger.error( + f'Internal server error: {error}', + extra={ + 'method': request.method, + 'path': request.path, + 'status_code': 500, + 'client_ip': request.remote_addr, + } + ) + return jsonify({ + 'error': 'Internal Server Error', + 'message': 'An unexpected error occurred' + }), 500 + + +if __name__ == '__main__': + logger.info( + f'Starting DevOps Info Service on {HOST}:{PORT}', + extra={'method': 'STARTUP', 'path': '/'} + ) + logger.info(f'Debug mode: {DEBUG}') + app.run(host=HOST, port=PORT, debug=DEBUG) diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..5d1da72dc8 --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,269 @@ +# Lab 01 — DevOps Info Service + +## Framework Selection + +### Choice: Flask 3.1.0 + +Flask was selected as the web framework for this project. + +**Rationale:** Flask is lightweight, has minimal setup requirements, and provides exactly what we need without unnecessary complexity. It's perfect for microservices and learning DevOps fundamentals. + +### Framework Comparison + +| Feature | Flask | FastAPI | Django | +|---------|-------|---------|--------| +| **Learning Curve** | Easy | Medium | Steep | +| **Performance** | Good | Excellent (async) | Good | +| **Auto-documentation** | No | Yes (OpenAPI) | No | +| **Batteries Included** | No | No | Yes (ORM, admin) | +| **Use Case** | Simple APIs | Modern async APIs | Full web apps | +| **Best For** | Learning, microservices | Production APIs | Complex projects | + +**Conclusion:** Flask is ideal for this lab because it's simple, well-documented, and widely used in DevOps tooling. + +## Best Practices Applied + +### 1. Clean Code Organization + +**Practice:** Structured imports, clear function names, proper docstrings. + +**Example:** + +```python +""" +DevOps Info Service +Main application module +""" +import os +import socket +import platform +from datetime import datetime, timezone +from flask import Flask, jsonify, request + +def get_system_info(): + """Collect system information.""" + return { + 'hostname': socket.gethostname(), + 'platform': platform.system(), + 'architecture': platform.machine(), + 'python_version': platform.python_version() + } +``` + +**Importance:** Clean code is easier to maintain, debug, and extend. Following PEP 8 standards ensures consistency across Python projects. + +### 2. Error Handling + +**Practice:** Custom error handlers for common HTTP errors. + +**Example:** + +```python +@app.errorhandler(404) +def not_found(error): + return jsonify({ + 'error': 'Not Found', + 'message': 'Endpoint does not exist' + }), 404 + +@app.errorhandler(500) +def internal_error(error): + return jsonify({ + 'error': 'Internal Server Error', + 'message': 'An unexpected error occurred' + }), 500 +``` + +**Importance:** Proper error handling provides meaningful feedback to API clients and prevents application crashes from exposing sensitive information. + +### 3. Structured Logging + +**Practice:** Configured logging with appropriate levels and formatting. + +**Example:** + +```python +import logging + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +logger.info(f'Starting DevOps Info Service on {HOST}:{PORT}') +logger.debug(f'Request: {request.method} {request.path}') +``` + +**Importance:** Logging is essential for debugging, monitoring, and auditing in production environments. Proper log levels help filter important events. + +### 4. Environment-Based Configuration + +**Practice:** All configuration values are sourced from environment variables with sensible defaults. + +**Example:** + +```python +HOST = os.getenv('HOST', '0.0.0.0') +PORT = int(os.getenv('PORT', 8080)) +DEBUG = os.getenv('DEBUG', 'False').lower() == 'true' +``` + +**Importance:** Environment variables enable 12-factor app compliance, making the application portable across different environments without code changes. + +### 5. Dependency Management + +**Practice:** Pinned versions in `requirements.txt`. + +```txt +Flask==3.1.0 +Werkzeug==3.1.3 +``` + +**Importance:** Version pinning ensures reproducible builds and prevents breaking changes from automatic updates. + +### 6. Git Ignore Configuration + +**Practice:** Comprehensive `.gitignore` to exclude generated files and sensitive data. + +**Importance:** Keeps the repository clean and prevents accidental commits of credentials, cache files, or OS-specific artifacts. + +## API Documentation + +### Endpoint: `GET /` + +**Description:** Returns comprehensive service and system information. + +**Request:** + +```bash +curl http://localhost:8080/ +``` + +**Response (200 OK):** + +```json +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Health check", + "method": "GET", + "path": "/health" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.7.1" + }, + "runtime": { + "current_time": "2026-01-27T10:12:24.616261+00:00", + "timezone": "UTC", + "uptime_human": "1 hour, 2 minutes", + "uptime_seconds": 3765 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "arm64", + "cpu_count": 11, + "hostname": "MacBook-Pro--Egor.local", + "platform": "Darwin", + "platform_version": "Darwin Kernel Version 25.2.0: Tue Nov 18 21:09:45 PST 2025; root:xnu-12377.61.12~1/RELEASE_ARM64_T6030", + "python_version": "3.12.3" + } +} +``` + +### Endpoint: `GET /health` + +**Description:** Health check endpoint for monitoring. + +**Request:** + +```bash +curl http://localhost:8080/health +``` + +**Response (200 OK):** + +```json +{ + "status": "healthy", + "timestamp": "2026-01-27T10:15:11.908501+00:00", + "uptime_seconds": 3932 +} +``` + +## Testing Evidence + +### Test Commands + +```bash +# Start the service +python app.py + +# Test main endpoint +curl http://localhost:8080/ + +# Test health check +curl http://localhost:8080/health + +# Pretty-printed output +curl -s http://localhost:8080/ | python3 -m json.tool + +# Custom port configuration +PORT=8080 python app.py +``` + +### Screenshots + +The following screenshots demonstrate the working application: + +1. **01-main-endpoint.png** - Main endpoint returning complete JSON with service, system, runtime, and request information +2. **02-health-check.png** - Health check endpoint returning status and uptime +3. **03-formatted-output.png** - Pretty-printed JSON output for better readability + +## Challenges & Solutions + +### Challenge 1: Environment Variable Configuration + +**Problem:** Needed to ensure environment variables work correctly across different operating systems. + +**Solution:** Used `os.getenv()` with sensible defaults and proper type conversion for PORT (int) and DEBUG (boolean). + +### Challenge 2: Uptime Formatting + +**Problem:** Raw uptime in seconds is not user-friendly for monitoring. + +**Solution:** Implemented `get_uptime()` function that returns both raw seconds and human-readable format, handling singular/plural forms correctly. + +## GitHub Community + +### Repository Stars + +**Why It Matters:** Starring repositories helps discover quality projects and shows appreciation to maintainers. It also signals to the community which tools are valuable and trustworthy. + +### Following Developers + +**Why It Matters:** Following developers enables continuous learning through observing their work, builds professional network for collaboration, and keeps you updated on industry trends and best practices. + +## Implementation Summary + +This lab successfully implemented a production-ready DevOps info service with: +- Two fully functional REST endpoints +- Comprehensive system introspection +- Environment-based configuration +- Proper error handling and logging +- Clean, maintainable code following Python best practices +- Complete documentation for users and developers \ No newline at end of file diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..e7ebf16c74 --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,362 @@ +# Lab 02 — Docker Containerization (Python) + +## Docker Best Practices Applied + +### 1. Non-Root User + +**Implementation:** +```dockerfile +RUN groupadd -r appuser && useradd -r -g appuser appuser +USER appuser +``` + +**Why It Matters:** Running as root inside containers is a security risk. If an attacker compromises the application, they would have root privileges inside the container, which could lead to container escape or host system compromise. Non-root user limits damage from potential security breaches. + +### 2. Specific Base Image Version + +**Implementation:** +```dockerfile +FROM python:3.12-slim +``` + +**Why It Matters:** Using specific versions (not `latest`) ensures reproducible builds. The `slim` variant is 40% smaller than the full image while including everything needed for Python apps, reducing attack surface and download time. + +### 3. Layer Caching Optimization + +**Implementation:** +```dockerfile +# Copy requirements first +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy code after +COPY app.py . +``` + +**Why It Matters:** Docker caches each layer. Since dependencies change rarely but code changes often, copying requirements first means pip install only runs when dependencies change. This dramatically speeds up rebuilds during development. + +### 4. Minimal File Copying + +**Implementation:** +```dockerfile +COPY requirements.txt . +COPY app.py . +``` + +**Why It Matters:** Only copying necessary files keeps image size small and reduces attack surface. No tests, docs, or development files in production image. + +### 5. .dockerignore File + +**Implementation:** +```dockerignore +venv/ +__pycache__/ +tests/ +docs/ +.git/ +``` + +**Why It Matters:** Excludes unnecessary files from build context, making builds faster and preventing accidental inclusion of sensitive files or large directories. Build context is sent to Docker daemon before build starts. + +### 6. No pip Cache + +**Implementation:** +```dockerfile +RUN pip install --no-cache-dir -r requirements.txt +``` + +**Why It Matters:** `--no-cache-dir` prevents pip from storing package cache, reducing image size by 10-20MB without affecting functionality. + +## Image Information & Decisions + +### Base Image: python:3.12-slim + +**Justification:** +- **Version 3.12** matches development environment (Python 3.12.3) +- **slim variant** is ~50MB smaller than full image +- Includes everything needed: Python runtime, pip, essential libraries +- More secure than full image (fewer packages = smaller attack surface) + +**Alternatives Considered:** +- `python:3.12-alpine` - smaller but uses musl instead of glibc, can cause compatibility issues +- `python:3.12` - full image ~350MB+ with unnecessary build tools + +### Final Image Size + +**Actual size: 223 MB** (compressed: 48.4 MB) + +**Size Breakdown:** +- Base python:3.12-slim: ~195 MB +- Dependencies (Flask, Werkzeug): ~28 MB +- Application code: <1 MB + +**Assessment:** Standard size for Python containerized application. The slim variant significantly reduces size compared to full Python image (~900+ MB). + +### Layer Structure + +1. Base image (python:3.12-slim) +2. User creation +3. Working directory setup +4. Requirements copy +5. Dependency installation +6. Application code copy +7. Permission changes +8. User switch +9. Environment variables +10. CMD definition + +**Optimization:** Dependencies installed before code copy enables layer caching. + +## Build & Run Process + +### Building the Image + +```bash +cd app_python +docker build -t devops-info-service-python:latest . +``` + +**Actual Output:** +``` +#1 [internal] load build definition from Dockerfile +#1 transferring dockerfile: 841B done +#1 DONE 0.0s + +#2 [internal] load metadata for docker.io/library/python:3.12-slim +#2 DONE 1.6s + +#3 [internal] load .dockerignore +#3 transferring context: 434B done +#3 DONE 0.0s + +#4 [1/7] FROM docker.io/library/python:3.12-slim@sha256:5e2dbd4bbdd9c0e67412aea9463906f74a22c60f89eb7b5bbb7d45b66a2b68a6 +#4 CACHED + +#5 [2/7] RUN groupadd -r appuser && useradd -r -g appuser appuser +#5 CACHED + +#6 [3/7] WORKDIR /app +#6 CACHED + +#7 [4/7] COPY requirements.txt . +#7 CACHED + +#8 [5/7] RUN pip install --no-cache-dir -r requirements.txt +#8 CACHED + +#9 [6/7] COPY app.py . +#9 CACHED + +#10 [7/7] RUN chown -R appuser:appuser /app +#10 CACHED + +#11 exporting to image +#11 exporting layers done +#11 exporting manifest sha256:a58a958ecd82e446c590402b9cd7392bc0d4bddff26a3c22e80b91aa91055f49 done +#11 naming to docker.io/library/devops-info-service-python:latest done +#11 DONE 0.0s +``` + +**Build Time:** ~4.5 seconds (with cache) + +### Running the Container + +```bash +docker run -p 8080:8080 devops-info-service-python:latest +``` + +**Actual Output:** +``` +2026-02-02 15:12:07,860 - __main__ - INFO - Starting DevOps Info Service on 0.0.0.0:8080 +2026-02-02 15:12:07,860 - __main__ - INFO - Debug mode: False + * Serving Flask app 'app' + * Debug mode: off +WARNING: This is a development server. Do not use it in a production deployment. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:8080 + * Running on http://172.17.0.2:8080 +Press CTRL+C to quit +``` + +**Container Stats:** +- Memory usage: 39.4 MiB +- CPU usage: ~0.04% +- Startup time: <1 second + +### Testing Endpoints + +```bash +# Main endpoint +curl http://localhost:8080/ + +# Health check +curl http://localhost:8080/health + +# Formatted output +curl -s http://localhost:8080/ | python3 -m json.tool +``` + +**Response from container (main endpoint):** +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Flask" + }, + "system": { + "hostname": "16d981980107", + "platform": "Linux", + "platform_version": "#1 SMP Thu Jan 15 14:58:53 UTC 2026", + "architecture": "aarch64", + "cpu_count": 11, + "python_version": "3.12.12" + }, + "runtime": { + "uptime_seconds": 35, + "uptime_human": "0 minutes", + "current_time": "2026-02-02T15:12:43.167490+00:00", + "timezone": "UTC" + } +} +``` + +### Docker Hub + +**Repository URL:** `https://hub.docker.com/r/aezuraa/devops-info-service` + +**Tagging Strategy:** + +Format: `username/repository:tag` +- `aezuraa` - Docker Hub username (required for push) +- `devops-info-service` - Repository name (consistent across all implementations) +- `python` - Language-specific tag (distinguishes from Go variant) + +**Why This Strategy:** +- **Descriptive tags:** `:python` and `:go` clearly identify implementation language +- **Single repository:** Both variants under one repo simplifies management +- **Semantic versioning ready:** Can add version tags later (e.g., `:python-v1.0`) +- **Production pattern:** Mirrors real-world multi-variant container naming + +**Tag & Push Commands:** +```bash +docker tag devops-info-service-python:latest aezuraa/devops-info-service:python +docker login +docker push aezuraa/devops-info-service:python +``` + +**Push Output:** +``` +The push refers to repository [docker.io/aezuraa/devops-info-service] +140d16322bf1: Pushed +d4dcd3efa12a: Pushed +5d39345861c8: Pushed +63cf2d5f63ab: Pushed +d637807aba98: Pushed +06e3a4e15303: Pushed +62f081338475: Pushed +1b3d94f08ecc: Pushed +ec212aae491c: Pushed +9f5ca0a479a5: Pushed +9c4374a520cb: Pushed +python: digest: sha256:d6ddca86964d8b2082edf1f285d5b8e072e7bd70f9ea70cb0133df3c911530c4 size: 856 +``` + +**Pull & Test:** +```bash +docker pull aezuraa/devops-info-service:python +docker run -p 8080:8080 aezuraa/devops-info-service:python +``` + +## Technical Analysis + +### Why This Dockerfile Works + +1. **Base Image Foundation:** Uses official Python slim image which includes Python runtime and pip pre-configured. + +2. **Security Through User Isolation:** Non-root user prevents privilege escalation attacks and follows principle of least privilege. + +3. **Build Performance:** Layer caching means only changed layers rebuild. Code changes don't trigger dependency reinstall. + +4. **Minimal Attack Surface:** Only production files included, no development tools or documentation. + +### What Would Happen If Layer Order Changed? + +**Bad Order (dependencies after code):** +```dockerfile +COPY app.py . +COPY requirements.txt . +RUN pip install -r requirements.txt +``` + +**Problem:** Every code change would invalidate the pip install layer, causing full dependency reinstall on every build. This wastes time and bandwidth. + +**Good Order (current):** +```dockerfile +COPY requirements.txt . +RUN pip install -r requirements.txt +COPY app.py . +``` + +**Benefit:** Code changes don't trigger dependency reinstall. Docker reuses cached pip layer. + +### Security Considerations + +1. **Non-Root Execution:** Limits damage from application vulnerabilities +2. **Specific Version Tags:** Prevents unexpected base image updates +3. **Minimal Dependencies:** Only Flask and Werkzeug, no unnecessary packages +4. **No Secrets in Image:** Environment variables provided at runtime +5. **Slim Base Image:** Fewer packages = fewer potential vulnerabilities + +### How .dockerignore Improves Build + +**Without .dockerignore:** +- Docker sends entire directory to daemon (~70MB with venv, docs, tests) +- Slower builds due to large context transfer +- Risk of including sensitive files accidentally + +**With .dockerignore:** +- Only sends necessary files (requirements.txt + app.py = ~5KB) +- Faster builds (especially over network) +- Impossible to accidentally include venv or .git + +**Impact:** Build context reduced dramatically, improving build speed and security. + +## Challenges & Solutions + +### Challenge 1: Permission Errors + +**Problem:** Application tried to write logs but had no permission as non-root user. + +**Solution:** Changed ownership before switching user: +```dockerfile +RUN chown -R appuser:appuser /app +USER appuser +``` + +### Challenge 2: Choosing Base Image + +**Problem:** Multiple Python image variants available - full, slim, alpine. + +**Solution:** Chose `python:3.12-slim` because: +- Smaller than full (130MB vs 350MB+) +- More compatible than alpine (uses glibc, not musl) +- Includes pip and essential libraries + +### Challenge 3: Layer Ordering + +**Problem:** Initial naive ordering caused slow rebuilds on code changes. + +**Solution:** Studied Docker layer caching and placed requirements.txt copy before app code copy. Now code changes don't invalidate dependency layer. + +## Implementation Summary + +Successfully containerized Python DevOps Info Service with: +- Secure non-root execution +- Optimized layer structure for fast rebuilds +- Minimal image size using slim base +- Production-ready Dockerfile following best practices +- Complete .dockerignore for efficient builds + +Image can be pulled from Docker Hub and deployed anywhere Docker runs. diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..07bf56baca --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,156 @@ +# Lab 03 — Continuous Integration + +## 1. Overview + +### Testing Framework: pytest + +**Why pytest:** Simple syntax with powerful fixtures, excellent plugin ecosystem (pytest-cov for coverage), widely used in industry. Less verbose than unittest, supports parameterized tests out of the box. + +### Test Coverage + +All endpoints and helper functions are tested: +- `GET /` — JSON structure, all fields, types, values, custom user agent +- `GET /health` — status, timestamp, uptime, field types +- Error handling — 404 JSON response, 405 method not allowed +- Helper functions — `get_system_info()`, `get_uptime()` + +**Total: 27 tests, 97% coverage** (missing only `__main__` block and 500 error handler logging). + +### CI Workflow Triggers + +```yaml +on: + push: + branches: [master, lab03] + paths: ['app_python/**', '.github/workflows/python-ci.yml'] + pull_request: + branches: [master] + paths: ['app_python/**', '.github/workflows/python-ci.yml'] +``` + +**Why:** Runs on push and PR to master, but ONLY when Python files change (path filters). No unnecessary runs when Go or docs change. + +### Versioning Strategy: CalVer + +**Format:** `YYYY.MM.DD` (e.g., `2026.02.02`) + +**Why CalVer over SemVer:** This is a service (not a library). CalVer clearly shows when image was built. No need to track breaking changes — API is internal. Date-based versioning is simpler for continuous deployment. + +**Docker tags per build:** +- `aezuraa/devops-info-service:python` — latest stable +- `aezuraa/devops-info-service:python-2026.02.02` — CalVer date +- `aezuraa/devops-info-service:python-abc1234` — commit SHA for traceability + +## 2. Workflow Evidence + +- **Workflow file:** `.github/workflows/python-ci.yml` +- **Successful workflow run (Python CI):** https://github.com/AEZuraa/DevOps-Core-Course/actions/runs/21912674725 +- **Docker Hub:** https://hub.docker.com/r/aezuraa/devops-info-service +- **Status badge:** Added to `app_python/README.md` + +### Tests Passing Locally + +``` +============================= test session starts ============================== +platform darwin -- Python 3.12.3, pytest-8.3.4, pluggy-1.6.0 +collected 27 items + +tests/test_app.py::TestMainEndpoint::test_status_code PASSED [ 3%] +tests/test_app.py::TestMainEndpoint::test_content_type PASSED [ 7%] +tests/test_app.py::TestMainEndpoint::test_service_fields PASSED [ 11%] +tests/test_app.py::TestMainEndpoint::test_system_fields PASSED [ 14%] +tests/test_app.py::TestMainEndpoint::test_system_field_types PASSED [ 18%] +tests/test_app.py::TestMainEndpoint::test_runtime_fields PASSED [ 22%] +tests/test_app.py::TestMainEndpoint::test_runtime_field_types PASSED [ 25%] +tests/test_app.py::TestMainEndpoint::test_request_fields PASSED [ 29%] +tests/test_app.py::TestMainEndpoint::test_endpoints_list PASSED [ 33%] +tests/test_app.py::TestMainEndpoint::test_all_top_level_keys PASSED [ 37%] +tests/test_app.py::TestMainEndpoint::test_custom_user_agent PASSED [ 40%] +tests/test_app.py::TestHealthEndpoint::test_status_code PASSED [ 44%] +tests/test_app.py::TestHealthEndpoint::test_content_type PASSED [ 48%] +tests/test_app.py::TestHealthEndpoint::test_health_status PASSED [ 51%] +tests/test_app.py::TestHealthEndpoint::test_health_fields PASSED [ 55%] +tests/test_app.py::TestHealthEndpoint::test_health_field_types PASSED [ 59%] +tests/test_app.py::TestHealthEndpoint::test_health_all_keys PASSED [ 62%] +tests/test_app.py::TestErrorHandling::test_404_unknown_endpoint PASSED [ 66%] +tests/test_app.py::TestErrorHandling::test_404_json_response PASSED [ 70%] +tests/test_app.py::TestErrorHandling::test_404_content_type PASSED [ 74%] +tests/test_app.py::TestErrorHandling::test_post_method_not_allowed PASSED [ 77%] +tests/test_app.py::TestErrorHandling::test_put_method_not_allowed PASSED [ 81%] +tests/test_app.py::TestHelperFunctions::test_get_system_info_returns_dict PASSED [ 85%] +tests/test_app.py::TestHelperFunctions::test_get_system_info_keys PASSED [ 88%] +tests/test_app.py::TestHelperFunctions::test_get_uptime_returns_dict PASSED [ 92%] +tests/test_app.py::TestHelperFunctions::test_get_uptime_non_negative PASSED [ 96%] +tests/test_app.py::TestHelperFunctions::test_get_uptime_human_readable PASSED [100%] + +============================== 27 passed in 0.33s ============================== +``` + +### Coverage Report + +``` +Name Stmts Miss Cover Missing +------------------------------------------------- +app.py 47 6 87% 51, 132-133, 140-142 +tests/__init__.py 0 0 100% +tests/test_app.py 155 0 100% +------------------------------------------------- +TOTAL 202 6 97% +``` + +**Not covered:** `if __name__ == '__main__'` block (lines 140-142) and 500 error handler logging (lines 132-133). These are runtime-only paths that don't affect test reliability. + +## 3. Best Practices Implemented + +1. **Job Dependencies:** Docker build only runs if lint+tests pass (`needs: lint-and-test`). No broken images pushed. + +2. **Dependency Caching:** `actions/setup-python` with `cache: 'pip'` caches pip packages based on `requirements-dev.txt` hash. Expected speedup: ~30-60s saved on dependency install (cache hit skips download entirely). + +3. **Snyk Security Scanning:** Scans dependencies for CVEs with `severity-threshold=high`. Uses `continue-on-error: true` — warns on vulnerabilities without blocking the pipeline. Only high/critical severity breaks the build. Flask 3.1.0 and Werkzeug 3.1.3 have no known high-severity vulnerabilities. + +4. **Workflow Concurrency:** `cancel-in-progress: true` cancels outdated runs on same branch. Saves CI minutes on rapid pushes. + +5. **Conditional Docker Push:** Docker step only runs on `push` events (`if: github.event_name == 'push'`). PRs only run tests, not push images. + +6. **Docker Layer Caching:** Uses GitHub Actions cache (`cache-from: type=gha`) for Docker BuildKit layers. Speeds up subsequent builds significantly (~60-80% faster). + +7. **Environment Variables:** Repeated values (`DOCKER_IMAGE`, `PYTHON_VERSION`) defined once in `env:` block. DRY principle. + +8. **Status Badge:** CI status visible in README without navigating to Actions tab. + +9. **Path Filters:** Workflows only trigger on relevant file changes. Python CI ignores Go changes and vice versa. + +10. **Coverage Threshold:** `--cov-fail-under=70` fails CI if coverage drops below 70%. + +### Caching Performance + +| Metric | Without Cache | With Cache | +|--------|--------------|------------| +| pip install | ~15-20s | ~2-3s | +| Docker build | ~30-45s | ~5-10s | +| **Total saved** | — | **~40-50s per run** | + +Cache key is based on `requirements-dev.txt` hash — changes to dependencies invalidate cache automatically. + +### Snyk Integration + +- **Severity threshold:** `high` — only high and critical vulnerabilities fail the build +- **Current status:** No high-severity vulnerabilities found in Flask 3.1.0 / Werkzeug 3.1.3 +- **`continue-on-error: true`** — advisory issues don't block development +- **Requires:** `SNYK_TOKEN` secret in GitHub repo settings + +## 4. Key Decisions + +**Versioning Strategy:** CalVer (`YYYY.MM.DD`). This is a service deployed continuously, not a library with breaking changes. Date-based tags make it obvious when an image was built, and commit SHA tags allow exact traceability. + +**Docker Tags:** Three tags per build — `:python` (rolling latest), `:python-2026.02.02` (CalVer), `:python-abc1234` (commit SHA). This allows pulling latest, pinning to a date, or pinning to exact commit. + +**Workflow Triggers:** Push to master/lab03 + PRs to master, with path filters. PRs run tests only (no Docker push). Push triggers full pipeline including Docker Hub push. + +**Test Coverage:** 97% coverage. Untested code is `__main__` block and 500 error logging — both are runtime-only and testing them would require mocking the server startup, which adds complexity without value. + +## 5. Challenges + +- **System Python conflict:** Global web3/eth_typing package interfered with pytest. Solved by using virtual environment for local testing. +- **Trailing whitespace:** flake8 flagged whitespace in blank lines. Fixed before committing. +- **Snyk token:** Requires manual setup in GitHub Secrets. Set to `continue-on-error` so CI works even without token configured initially. diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..d239b91627 Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..f4c80e23c6 Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ diff --git a/app_python/docs/screenshots/03.formatted-output.png b/app_python/docs/screenshots/03.formatted-output.png new file mode 100644 index 0000000000..1e7fb3762d Binary files /dev/null and b/app_python/docs/screenshots/03.formatted-output.png differ diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..c1cc3c195e --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,9 @@ +# Development & testing dependencies +-r requirements.txt + +# Testing +pytest==8.3.4 +pytest-cov==6.0.0 + +# Linting +flake8==7.1.1 diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..ec9dd912c2 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,6 @@ +# Web Framework +Flask==3.1.0 +Werkzeug==3.1.3 + +# Metrics +prometheus-client==0.23.1 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py new file mode 100644 index 0000000000..57dc147cfa --- /dev/null +++ b/app_python/tests/test_app.py @@ -0,0 +1,297 @@ +"""Unit tests for DevOps Info Service.""" +import json +import pytest +import app as app_module +from app import app, get_system_info, get_uptime + + +@pytest.fixture(autouse=True) +def isolate_visits(tmp_path): + """Redirect visits file to a temp directory for every test.""" + tmp_file = str(tmp_path / "visits") + original = app_module.VISITS_FILE + app_module.VISITS_FILE = tmp_file + yield + app_module.VISITS_FILE = original + + +@pytest.fixture +def client(): + """Create test client for Flask app.""" + app.config['TESTING'] = True + with app.test_client() as client: + yield client + + +# --- GET / endpoint tests --- + +class TestMainEndpoint: + """Tests for GET / endpoint.""" + + def test_status_code(self, client): + """GET / returns 200 OK.""" + response = client.get('/') + assert response.status_code == 200 + + def test_content_type(self, client): + """GET / returns JSON content type.""" + response = client.get('/') + assert response.content_type == 'application/json' + + def test_service_fields(self, client): + """GET / response contains all required service fields.""" + response = client.get('/') + data = json.loads(response.data) + + assert 'service' in data + service = data['service'] + assert service['name'] == 'devops-info-service' + assert service['version'] == '1.0.0' + assert service['description'] == 'DevOps course info service' + assert service['framework'] == 'Flask' + + def test_system_fields(self, client): + """GET / response contains all required system fields.""" + response = client.get('/') + data = json.loads(response.data) + + assert 'system' in data + system = data['system'] + assert 'hostname' in system + assert 'platform' in system + assert 'platform_version' in system + assert 'architecture' in system + assert 'cpu_count' in system + assert 'python_version' in system + + def test_system_field_types(self, client): + """System fields have correct types.""" + response = client.get('/') + data = json.loads(response.data) + system = data['system'] + + assert isinstance(system['hostname'], str) + assert isinstance(system['platform'], str) + assert isinstance(system['cpu_count'], int) + assert system['cpu_count'] > 0 + + def test_runtime_fields(self, client): + """GET / response contains all required runtime fields.""" + response = client.get('/') + data = json.loads(response.data) + + assert 'runtime' in data + runtime = data['runtime'] + assert 'uptime_seconds' in runtime + assert 'uptime_human' in runtime + assert 'current_time' in runtime + assert runtime['timezone'] == 'UTC' + + def test_runtime_field_types(self, client): + """Runtime fields have correct types.""" + response = client.get('/') + data = json.loads(response.data) + runtime = data['runtime'] + + assert isinstance(runtime['uptime_seconds'], int) + assert runtime['uptime_seconds'] >= 0 + assert isinstance(runtime['uptime_human'], str) + assert isinstance(runtime['current_time'], str) + + def test_request_fields(self, client): + """GET / response contains request metadata.""" + response = client.get('/') + data = json.loads(response.data) + + assert 'request' in data + req = data['request'] + assert req['method'] == 'GET' + assert req['path'] == '/' + assert 'client_ip' in req + assert 'user_agent' in req + + def test_endpoints_list(self, client): + """GET / response contains endpoints list.""" + response = client.get('/') + data = json.loads(response.data) + + assert 'endpoints' in data + endpoints = data['endpoints'] + assert isinstance(endpoints, list) + assert len(endpoints) == 4 + + paths = [e['path'] for e in endpoints] + assert '/' in paths + assert '/health' in paths + assert '/visits' in paths + assert '/metrics' in paths + + def test_all_top_level_keys(self, client): + """GET / response has exactly the expected top-level keys.""" + response = client.get('/') + data = json.loads(response.data) + + expected_keys = {'service', 'system', 'runtime', 'request', 'endpoints', 'visits'} + assert set(data.keys()) == expected_keys + + def test_custom_user_agent(self, client): + """GET / captures custom user agent.""" + response = client.get('/', headers={'User-Agent': 'TestBot/1.0'}) + data = json.loads(response.data) + assert data['request']['user_agent'] == 'TestBot/1.0' + + +# --- GET /health endpoint tests --- + +class TestHealthEndpoint: + """Tests for GET /health endpoint.""" + + def test_status_code(self, client): + """GET /health returns 200 OK.""" + response = client.get('/health') + assert response.status_code == 200 + + def test_content_type(self, client): + """GET /health returns JSON content type.""" + response = client.get('/health') + assert response.content_type == 'application/json' + + def test_health_status(self, client): + """GET /health returns 'healthy' status.""" + response = client.get('/health') + data = json.loads(response.data) + assert data['status'] == 'healthy' + + def test_health_fields(self, client): + """GET /health contains all required fields.""" + response = client.get('/health') + data = json.loads(response.data) + + assert 'status' in data + assert 'timestamp' in data + assert 'uptime_seconds' in data + + def test_health_field_types(self, client): + """Health fields have correct types.""" + response = client.get('/health') + data = json.loads(response.data) + + assert isinstance(data['status'], str) + assert isinstance(data['timestamp'], str) + assert isinstance(data['uptime_seconds'], int) + assert data['uptime_seconds'] >= 0 + + def test_health_all_keys(self, client): + """GET /health has exactly the expected keys.""" + response = client.get('/health') + data = json.loads(response.data) + + expected_keys = {'status', 'timestamp', 'uptime_seconds'} + assert set(data.keys()) == expected_keys + + +# --- Error handling tests --- + +class TestErrorHandling: + """Tests for error handling.""" + + def test_404_unknown_endpoint(self, client): + """Unknown endpoints return 404.""" + response = client.get('/nonexistent') + assert response.status_code == 404 + + def test_404_json_response(self, client): + """404 response is JSON with error fields.""" + response = client.get('/nonexistent') + data = json.loads(response.data) + + assert 'error' in data + assert data['error'] == 'Not Found' + assert 'message' in data + + def test_404_content_type(self, client): + """404 response has JSON content type.""" + response = client.get('/nonexistent') + assert response.content_type == 'application/json' + + def test_post_method_not_allowed(self, client): + """POST to / returns 405 Method Not Allowed.""" + response = client.post('/') + assert response.status_code == 405 + + def test_put_method_not_allowed(self, client): + """PUT to / returns 405 Method Not Allowed.""" + response = client.put('/') + assert response.status_code == 405 + + +# --- Helper function tests --- + +class TestHelperFunctions: + """Tests for helper functions.""" + + def test_get_system_info_returns_dict(self): + """get_system_info returns a dictionary.""" + info = get_system_info() + assert isinstance(info, dict) + + def test_get_system_info_keys(self): + """get_system_info has all required keys.""" + info = get_system_info() + required_keys = {'hostname', 'platform', 'platform_version', + 'architecture', 'cpu_count', 'python_version'} + assert required_keys.issubset(set(info.keys())) + + def test_get_uptime_returns_dict(self): + """get_uptime returns a dictionary.""" + uptime = get_uptime() + assert isinstance(uptime, dict) + assert 'seconds' in uptime + assert 'human' in uptime + + def test_get_uptime_non_negative(self): + """Uptime seconds is non-negative.""" + uptime = get_uptime() + assert uptime['seconds'] >= 0 + + def test_get_uptime_human_readable(self): + """Uptime human string contains 'minute'.""" + uptime = get_uptime() + assert 'minute' in uptime['human'] + + +class TestMetricsEndpoint: + """Tests for GET /metrics endpoint.""" + + def test_metrics_status_code(self, client): + """GET /metrics returns 200.""" + response = client.get('/metrics') + assert response.status_code == 200 + + def test_metrics_content_type(self, client): + """GET /metrics returns Prometheus text format.""" + response = client.get('/metrics') + assert 'text/plain' in response.content_type or 'text/plain' in response.content_type + + def test_metrics_contains_http_requests_total(self, client): + """Metrics output includes http_requests_total counter.""" + client.get('/') + response = client.get('/metrics') + assert b'http_requests_total' in response.data + + def test_metrics_contains_histogram(self, client): + """Metrics output includes http_request_duration_seconds histogram.""" + client.get('/') + response = client.get('/metrics') + assert b'http_request_duration_seconds' in response.data + + def test_metrics_contains_gauge(self, client): + """Metrics output includes http_requests_in_progress gauge.""" + response = client.get('/metrics') + assert b'http_requests_in_progress' in response.data + + def test_metrics_contains_business_counter(self, client): + """Metrics output includes business-level endpoint call counter.""" + client.get('/') + response = client.get('/metrics') + assert b'devops_info_endpoint_calls_total' in response.data diff --git a/docs/LAB04.md b/docs/LAB04.md new file mode 100644 index 0000000000..cf49aa26f1 --- /dev/null +++ b/docs/LAB04.md @@ -0,0 +1,331 @@ +# Lab 04 — Infrastructure as Code (Terraform & Pulumi) + +## 1. Cloud Provider & Infrastructure + +**Provider:** Yandex Cloud — free tier, accessible in Russia, native Terraform/Pulumi support. + +**Instance:** standard-v2, 2 vCPU (20%), 1 GB RAM, 10 GB HDD, Ubuntu 24.04 LTS. + +**Region:** ru-central1-a. + +**Cost:** $0 (free tier grant). + +**Resources created:** + +| Resource | Name | Purpose | +|----------|------|---------| +| VPC Network | lab04-network | Isolated network | +| Subnet | lab04-subnet | 10.0.1.0/24 | +| Security Group | lab04-sg | SSH(22), HTTP(80), App(5000) | +| Compute Instance | lab04-vm | Main VM | + +## 2. Terraform Implementation + +**Version:** Terraform 1.5.7, Yandex Cloud provider v0.187.0. + +**Project structure:** + +``` +terraform/ +├── main.tf # Provider + all resources +├── variables.tf # Input variables with defaults +├── outputs.tf # VM IP, SSH command, IDs +├── terraform.tfvars # Actual secrets (gitignored) +├── terraform.tfvars.example +├── .tflint.hcl # Linter config +├── .gitignore +└── README.md +``` + +**Key decisions:** +- Service account key auth instead of OAuth token — more reliable for automation +- Variables with empty defaults — allows CI validation without credentials +- SSH public key passed as variable content — CI-friendly +- All resources labeled with `project = "devops-lab04"` +- YC provider mirror (`terraform-mirror.yandexcloud.net`) — registry.terraform.io inaccessible from Russia + +**Challenges:** +- `registry.terraform.io` not accessible — resolved with `~/.terraformrc` network mirror +- "Permission denied to resource-manager.folder" — billing account was not linked; resolved after linking +- Security group creation initially failed before billing activation + +
+terraform init + +``` +Initializing the backend... +Initializing provider plugins... +- Finding latest version of yandex-cloud/yandex... +- Installing yandex-cloud/yandex v0.187.0 (unauthenticated) +Terraform has been successfully initialized! +``` + +
+ +
+terraform plan + +``` +Plan: 4 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + security_group_id = (known after apply) + + ssh_command = (known after apply) + + subnet_id = (known after apply) + + vm_name = "lab04-vm" + + vm_public_ip = (known after apply) +``` + +
+ +
+terraform apply + +``` +yandex_vpc_network.lab04: Creating... +yandex_vpc_network.lab04: Creation complete after 3s +yandex_vpc_subnet.lab04: Creating... +yandex_vpc_security_group.lab04: Creating... +yandex_vpc_subnet.lab04: Creation complete after 0s +yandex_vpc_security_group.lab04: Creation complete after 1s +yandex_compute_instance.lab04: Creating... +yandex_compute_instance.lab04: Creation complete after 40s + +Apply complete! Resources: 4 added, 0 changed, 0 destroyed. + +Outputs: + security_group_id = "enp8ughv6u025h126jdu" + ssh_command = "ssh ubuntu@93.77.184.123" + subnet_id = "e9bj9tsdlk5l00cvbivl" + vm_name = "lab04-vm" + vm_public_ip = "93.77.184.123" +``` + +
+ +
+SSH connection proof + +``` +$ ssh ubuntu@93.77.184.123 "hostname && uname -a" +fhmvh5a6rb2v0hkooftk +Linux fhmvh5a6rb2v0hkooftk 6.8.0-100-generic #100-Ubuntu SMP PREEMPT_DYNAMIC Tue Jan 13 16:40:06 UTC 2026 x86_64 x86_64 x86_64 GNU/Linux +``` + +
+ +## 3. Pulumi Implementation + +**Version:** Pulumi 3.220.0, Python 3.11, pulumi-yandex 0.13.0. + +**How code differs from Terraform:** +- Python instead of HCL — real programming language with full IDE support +- Resources are Python objects with typed arguments +- Configuration via `pulumi config` instead of `.tfvars` +- Secrets encrypted by default with passphrase +- Outputs via `pulumi.export()` instead of `output` blocks +- Local backend with `pulumi login --local` + +**Advantages discovered:** +- Autocomplete and type checking in IDE +- Familiar Python syntax +- Better error messages with Python stack traces +- Native loops, functions, conditionals + +**Challenges:** +- `pulumi-yandex` 0.13.0 uses deprecated `pkg_resources` — resolved by pinning `setuptools<70` +- API differences: `ingresses`/`egresses` instead of `ingress`/`egress` (plural form) +- Pulumi installs its own venv — need to manage dependencies through `pulumi install` + +
+terraform destroy (cleanup before Pulumi) + +``` +yandex_compute_instance.lab04: Destroying... +yandex_compute_instance.lab04: Destruction complete after 33s +yandex_vpc_subnet.lab04: Destroying... +yandex_vpc_security_group.lab04: Destroying... +yandex_vpc_security_group.lab04: Destruction complete after 1s +yandex_vpc_subnet.lab04: Destruction complete after 2s +yandex_vpc_network.lab04: Destroying... +yandex_vpc_network.lab04: Destruction complete after 1s + +Destroy complete! Resources: 4 destroyed. +``` + +
+ +
+pulumi preview + +``` +Previewing update (dev): + + pulumi:pulumi:Stack lab04-infra-dev create + + yandex:index:VpcNetwork lab04-network create + + yandex:index:VpcSubnet lab04-subnet create + + yandex:index:VpcSecurityGroup lab04-sg create + + yandex:index:ComputeInstance lab04-vm create + +Resources: + + 5 to create +``` + +
+ +
+pulumi up + +``` +Updating (dev): + + yandex:index:VpcNetwork lab04-network created (2s) + + yandex:index:VpcSubnet lab04-subnet created (0.37s) + + yandex:index:VpcSecurityGroup lab04-sg created (1s) + + yandex:index:ComputeInstance lab04-vm created (43s) + + pulumi:pulumi:Stack lab04-infra-dev created (47s) + +Outputs: + ssh_command : "ssh ubuntu@89.169.134.171" + vm_name : "lab04-vm" + vm_public_ip: "89.169.134.171" + +Resources: + + 5 created +Duration: 48s +``` + +
+ +
+SSH connection proof + +``` +$ ssh ubuntu@89.169.134.171 "hostname && uname -a" +fhmps7t0s3qa1rih6vot +Linux fhmps7t0s3qa1rih6vot 6.8.0-100-generic #100-Ubuntu SMP PREEMPT_DYNAMIC Tue Jan 13 16:40:06 UTC 2026 x86_64 x86_64 x86_64 GNU/Linux +``` + +
+ +## 4. Terraform vs Pulumi Comparison + +**Ease of Learning:** Terraform is simpler to start with — HCL is concise and docs are extensive. Pulumi requires programming knowledge but feels more natural for developers. + +**Code Readability:** HCL is more declarative and easy to scan. Python code is more verbose but benefits from IDE autocomplete and type checking. + +**Debugging:** Pulumi has better error messages (Python tracebacks with line numbers). Terraform errors can be cryptic, especially with complex expressions. + +**Documentation:** Terraform has a larger community and more examples. The `pulumi-yandex` package is poorly maintained (deprecated deps, sparse docs). Terraform YC provider is much more mature. + +**Use Case:** Terraform for ops/SRE teams and simple infra. Pulumi when infra logic is complex or the team is developer-heavy. For Yandex Cloud specifically, Terraform is the clear winner due to provider maturity. + +## 5. Lab 5 Preparation & Cleanup + +**VM for Lab 5:** Yes, keeping Pulumi-created VM (`89.169.134.171`). + +**Cleanup status:** +- Terraform resources: fully destroyed (`terraform destroy` — 4 resources) +- Pulumi VM: kept running for Lab 5 +- Cloud console verified — only Pulumi resources remain + +## Bonus: IaC CI/CD + +**Workflow:** `.github/workflows/terraform-ci.yml` + +Runs on PRs/pushes affecting `terraform/**`: +1. `terraform fmt -check` — formatting +2. `terraform init -backend=false` — provider download +3. `terraform validate` — syntax check +4. `tflint` — best practice linting + +Path filters ensure workflow only triggers on Terraform changes. + +## Bonus: GitHub Repository Import + +**Purpose:** Managing existing resources with Terraform demonstrates brownfield IaC adoption — bringing manually created infrastructure under IaC management. + +**Why importing matters:** +- **Version control:** All configuration changes tracked in Git +- **Code review:** PR-based review prevents unauthorized modifications +- **Prevents drift:** Terraform detects manual changes +- **Living documentation:** Code reflects actual infrastructure state +- **Disaster recovery:** Recreate infrastructure from code +- **Team collaboration:** No conflicting manual changes + +**Real-world use case:** Organizations with 100s of manually created resources gradually import them into Terraform to gain full IaC benefits without disrupting existing services. + +--- + +### Import Process + +**1. Setup:** +```bash +cd terraform/github +terraform init +export GITHUB_TOKEN="your-personal-access-token" +export TF_VAR_github_owner="AEZuraa" +``` + +**2. Import existing repository:** +```bash +terraform import github_repository.course_repo DevOps-Core-Course +``` + +**Output:** +``` +github_repository.course_repo: Importing from ID "DevOps-Core-Course"... +github_repository.course_repo: Import prepared! + Prepared github_repository for import +github_repository.course_repo: Refreshing state... [id=DevOps-Core-Course] + +Import successful! + +Resources: + 1 imported + 0 added + 0 changed + 0 destroyed +``` + +**3. Check for drift:** +```bash +terraform plan +``` + +The first `terraform plan` after import showed **configuration drift** — differences between Terraform config and actual GitHub state: + +- `has_issues`: config had `true`, reality was `false` +- `has_wiki`: config had `false`, reality was `true` +- `has_projects`: config had `false`, reality was `true` +- Merge settings didn't match +- Description was much longer in reality + +**4. Fix drift by updating config to match reality:** + +Updated `main.tf` to reflect actual GitHub settings (description, has_issues, has_wiki, has_projects, merge settings). + +**5. Verify no changes needed:** +```bash +terraform plan +``` + +Expected output: +``` +No changes. Your infrastructure matches the configuration. +``` + +**Result:** Repository is now under Terraform management. Any future changes must go through code, enabling PR reviews, CI validation, and audit trails. + +--- + +### Benefits Observed + +**Before import:** Repository settings could be changed manually by anyone with access, no audit trail. + +**After import:** All changes must be: +1. Written in code (`main.tf`) +2. Reviewed via PR +3. Validated by CI/CD +4. Applied through Terraform +5. Tracked in Git history + +This prevents accidental misconfiguration and provides full visibility into infrastructure changes. diff --git a/edge-api/.editorconfig b/edge-api/.editorconfig new file mode 100644 index 0000000000..a727df347a --- /dev/null +++ b/edge-api/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = tab +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.yml] +indent_style = space diff --git a/edge-api/.gitignore b/edge-api/.gitignore new file mode 100644 index 0000000000..4138168d75 --- /dev/null +++ b/edge-api/.gitignore @@ -0,0 +1,167 @@ +# Logs + +logs +_.log +npm-debug.log_ +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) + +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# Runtime data + +pids +_.pid +_.seed +\*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover + +lib-cov + +# Coverage directory used by tools like istanbul + +coverage +\*.lcov + +# nyc test coverage + +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) + +.grunt + +# Bower dependency directory (https://bower.io/) + +bower_components + +# node-waf configuration + +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) + +build/Release + +# Dependency directories + +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) + +web_modules/ + +# TypeScript cache + +\*.tsbuildinfo + +# Optional npm cache directory + +.npm + +# Optional eslint cache + +.eslintcache + +# Optional stylelint cache + +.stylelintcache + +# Microbundle cache + +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history + +.node_repl_history + +# Output of 'npm pack' + +\*.tgz + +# Yarn Integrity file + +.yarn-integrity + +# parcel-bundler cache (https://parceljs.org/) + +.cache +.parcel-cache + +# Next.js build output + +.next +out + +# Nuxt.js build / generate output + +.nuxt +dist + +# Gatsby files + +.cache/ + +# Comment in the public line in if your project uses Gatsby and not Next.js + +# https://nextjs.org/blog/next-9-1#public-directory-support + +# public + +# vuepress build output + +.vuepress/dist + +# vuepress v2.x temp and cache directory + +.temp +.cache + +# Docusaurus cache and generated files + +.docusaurus + +# Serverless directories + +.serverless/ + +# FuseBox cache + +.fusebox/ + +# DynamoDB Local files + +.dynamodb/ + +# TernJS port file + +.tern-port + +# Stores VSCode versions used for testing VSCode extensions + +.vscode-test + +# yarn v2 + +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.\* + +# wrangler project + +.dev.vars* +!.dev.vars.example +.env* +!.env.example +.wrangler/ diff --git a/edge-api/.prettierrc b/edge-api/.prettierrc new file mode 100644 index 0000000000..5c7b5d3c7a --- /dev/null +++ b/edge-api/.prettierrc @@ -0,0 +1,6 @@ +{ + "printWidth": 140, + "singleQuote": true, + "semi": true, + "useTabs": true +} diff --git a/edge-api/.vscode/settings.json b/edge-api/.vscode/settings.json new file mode 100644 index 0000000000..0126e59b82 --- /dev/null +++ b/edge-api/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "files.associations": { + "wrangler.json": "jsonc" + } +} \ No newline at end of file diff --git a/edge-api/AGENTS.md b/edge-api/AGENTS.md new file mode 100644 index 0000000000..340506a599 --- /dev/null +++ b/edge-api/AGENTS.md @@ -0,0 +1,41 @@ +# Cloudflare Workers + +STOP. Your knowledge of Cloudflare Workers APIs and limits may be outdated. Always retrieve current documentation before any Workers, KV, R2, D1, Durable Objects, Queues, Vectorize, AI, or Agents SDK task. + +## Docs + +- https://developers.cloudflare.com/workers/ +- MCP: `https://docs.mcp.cloudflare.com/mcp` + +For all limits and quotas, retrieve from the product's `/platform/limits/` page. eg. `/workers/platform/limits` + +## Commands + +| Command | Purpose | +|---------|---------| +| `npx wrangler dev` | Local development | +| `npx wrangler deploy` | Deploy to Cloudflare | +| `npx wrangler types` | Generate TypeScript types | + +Run `wrangler types` after changing bindings in wrangler.jsonc. + +## Node.js Compatibility + +https://developers.cloudflare.com/workers/runtime-apis/nodejs/ + +## Errors + +- **Error 1102** (CPU/Memory exceeded): Retrieve limits from `/workers/platform/limits/` +- **All errors**: https://developers.cloudflare.com/workers/observability/errors/ + +## Product Docs + +Retrieve API references and limits from: +`/kv/` · `/r2/` · `/d1/` · `/durable-objects/` · `/queues/` · `/vectorize/` · `/workers-ai/` · `/agents/` + +## Best Practices (conditional) + +If the application uses Durable Objects or Workflows, refer to the relevant best practices: + +- Durable Objects: https://developers.cloudflare.com/durable-objects/best-practices/rules-of-durable-objects/ +- Workflows: https://developers.cloudflare.com/workflows/build/rules-of-workflows/ diff --git a/edge-api/package-lock.json b/edge-api/package-lock.json new file mode 100644 index 0000000000..7adee25cf0 --- /dev/null +++ b/edge-api/package-lock.json @@ -0,0 +1,2913 @@ +{ + "name": "edge-api", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "edge-api", + "version": "0.0.0", + "devDependencies": { + "@cloudflare/vitest-pool-workers": "^0.12.4", + "@types/node": "^25.6.2", + "typescript": "^5.5.2", + "vitest": "~3.2.0", + "wrangler": "^4.90.0" + } + }, + "node_modules/@cloudflare/kv-asset-handler": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.5.0.tgz", + "integrity": "sha512-jxQYkj8dSIzc0cD6cMMNdOc1UVjqSqu8BZdor5s8cGjW2I8BjODt/kWPVdY+u9zj3ms75Q5qaZgnxUad83+eAg==", + "dev": true, + "license": "MIT OR Apache-2.0", + "engines": { + "node": ">=22.0.0" + } + }, + "node_modules/@cloudflare/unenv-preset": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/@cloudflare/unenv-preset/-/unenv-preset-2.16.1.tgz", + "integrity": "sha512-ECxObrMfyTl5bhQf/lZCXwo5G6xX9IAUo+nDMKK4SZ8m4Jvvxp52vilxyySSWh2YTZz8+HQ07qGH/2rEom1vDw==", + "dev": true, + "license": "MIT OR Apache-2.0", + "peerDependencies": { + "unenv": "2.0.0-rc.24", + "workerd": ">1.20260305.0 <2.0.0-0" + }, + "peerDependenciesMeta": { + "workerd": { + "optional": true + } + } + }, + "node_modules/@cloudflare/vitest-pool-workers": { + "version": "0.12.21", + "resolved": "https://registry.npmjs.org/@cloudflare/vitest-pool-workers/-/vitest-pool-workers-0.12.21.tgz", + "integrity": "sha512-xqvqVR+qAhekXWaTNY36UtFFmHrz13yGUoWVGOu6LDC2ABiQqI1E1lQ3eUZY8KVB+1FXY/mP5dB6oD07XUGnPg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cjs-module-lexer": "^1.2.3", + "esbuild": "0.27.3", + "miniflare": "4.20260310.0", + "wrangler": "4.72.0" + }, + "peerDependencies": { + "@vitest/runner": "2.0.x - 3.2.x", + "@vitest/snapshot": "2.0.x - 3.2.x", + "vitest": "2.0.x - 3.2.x" + } + }, + "node_modules/@cloudflare/vitest-pool-workers/node_modules/@cloudflare/kv-asset-handler": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@cloudflare/kv-asset-handler/-/kv-asset-handler-0.4.2.tgz", + "integrity": "sha512-SIOD2DxrRRwQ+jgzlXCqoEFiKOFqaPjhnNTGKXSRLvp1HiOvapLaFG2kEr9dYQTYe8rKrd9uvDUzmAITeNyaHQ==", + "dev": true, + "license": "MIT OR Apache-2.0", + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@cloudflare/vitest-pool-workers/node_modules/@cloudflare/unenv-preset": { + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/@cloudflare/unenv-preset/-/unenv-preset-2.15.0.tgz", + "integrity": "sha512-EGYmJaGZKWl+X8tXxcnx4v2bOZSjQeNI5dWFeXivgX9+YCT69AkzHHwlNbVpqtEUTbew8eQurpyOpeN8fg00nw==", + "dev": true, + "license": "MIT OR Apache-2.0", + "peerDependencies": { + "unenv": "2.0.0-rc.24", + "workerd": "1.20260301.1 || ~1.20260302.1 || ~1.20260303.1 || ~1.20260304.1 || >1.20260305.0 <2.0.0-0" + }, + "peerDependenciesMeta": { + "workerd": { + "optional": true + } + } + }, + "node_modules/@cloudflare/vitest-pool-workers/node_modules/wrangler": { + "version": "4.72.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.72.0.tgz", + "integrity": "sha512-bKkb8150JGzJZJWiNB2nu/33smVfawmfYiecA6rW4XH7xS23/jqMbgpdelM34W/7a1IhR66qeQGVqTRXROtAZg==", + "dev": true, + "license": "MIT OR Apache-2.0", + "dependencies": { + "@cloudflare/kv-asset-handler": "0.4.2", + "@cloudflare/unenv-preset": "2.15.0", + "blake3-wasm": "2.1.5", + "esbuild": "0.27.3", + "miniflare": "4.20260310.0", + "path-to-regexp": "6.3.0", + "unenv": "2.0.0-rc.24", + "workerd": "1.20260310.1" + }, + "bin": { + "wrangler": "bin/wrangler.js", + "wrangler2": "bin/wrangler.js" + }, + "engines": { + "node": ">=20.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20260310.1" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } + } + }, + "node_modules/@cloudflare/workerd-darwin-64": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20260310.1.tgz", + "integrity": "sha512-hF2VpoWaMb1fiGCQJqCY6M8I+2QQqjkyY4LiDYdTL5D/w6C1l5v1zhc0/jrjdD1DXfpJtpcSMSmEPjHse4p9Ig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/@cloudflare/workerd-darwin-arm64": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20260310.1.tgz", + "integrity": "sha512-h/Vl3XrYYPI6yFDE27XO1QPq/1G1lKIM8tzZGIWYpntK3IN5XtH3Ee/sLaegpJ49aIJoqhF2mVAZ6Yw+Vk2gJw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/@cloudflare/workerd-linux-64": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20260310.1.tgz", + "integrity": "sha512-XzQ0GZ8G5P4d74bQYOIP2Su4CLdNPpYidrInaSOuSxMw+HamsHaFrjVsrV2mPy/yk2hi6SY2yMbgKFK9YjA7vw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/@cloudflare/workerd-linux-arm64": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20260310.1.tgz", + "integrity": "sha512-sxv4CxnN4ZR0uQGTFVGa0V4KTqwdej/czpIc5tYS86G8FQQoGIBiAIs2VvU7b8EROPcandxYHDBPTb+D9HIMPw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/@cloudflare/workerd-windows-64": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20260310.1.tgz", + "integrity": "sha512-+1ZTViWKJypLfgH/luAHCqkent0DEBjAjvO40iAhOMHRLYP/SPphLvr4Jpi6lb+sIocS8Q1QZL4uM5Etg1Wskg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", + "integrity": "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.3.tgz", + "integrity": "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.3.tgz", + "integrity": "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.3.tgz", + "integrity": "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.3.tgz", + "integrity": "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.3.tgz", + "integrity": "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.3.tgz", + "integrity": "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.3.tgz", + "integrity": "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.3.tgz", + "integrity": "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.3.tgz", + "integrity": "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.3.tgz", + "integrity": "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.3.tgz", + "integrity": "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.3.tgz", + "integrity": "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.3.tgz", + "integrity": "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.3.tgz", + "integrity": "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.3.tgz", + "integrity": "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.3.tgz", + "integrity": "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.3.tgz", + "integrity": "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.3.tgz", + "integrity": "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.3.tgz", + "integrity": "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.3.tgz", + "integrity": "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.3.tgz", + "integrity": "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.3.tgz", + "integrity": "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.3.tgz", + "integrity": "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.3.tgz", + "integrity": "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.3.tgz", + "integrity": "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@img/colour": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@img/colour/-/colour-1.1.0.tgz", + "integrity": "sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz", + "integrity": "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz", + "integrity": "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz", + "integrity": "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz", + "integrity": "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz", + "integrity": "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz", + "integrity": "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-ppc64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz", + "integrity": "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-riscv64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz", + "integrity": "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz", + "integrity": "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz", + "integrity": "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz", + "integrity": "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz", + "integrity": "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "LGPL-3.0-or-later", + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz", + "integrity": "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz", + "integrity": "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-ppc64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz", + "integrity": "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-ppc64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-riscv64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz", + "integrity": "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-riscv64": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz", + "integrity": "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg==", + "cpu": [ + "s390x" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.2.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz", + "integrity": "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz", + "integrity": "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz", + "integrity": "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.2.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz", + "integrity": "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT", + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.7.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-arm64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz", + "integrity": "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz", + "integrity": "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz", + "integrity": "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0 AND LGPL-3.0-or-later", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@poppinss/colors": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@poppinss/colors/-/colors-4.1.6.tgz", + "integrity": "sha512-H9xkIdFswbS8n1d6vmRd8+c10t2Qe+rZITbbDHHkQixH5+2x1FDGmi/0K+WgWiqQFKPSlIYB7jlH6Kpfn6Fleg==", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^4.1.5" + } + }, + "node_modules/@poppinss/dumper": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/@poppinss/dumper/-/dumper-0.6.5.tgz", + "integrity": "sha512-NBdYIb90J7LfOI32dOewKI1r7wnkiH6m920puQ3qHUeZkxNkQiFnXVWoE6YtFSv6QOiPPf7ys6i+HWWecDz7sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@poppinss/colors": "^4.1.5", + "@sindresorhus/is": "^7.0.2", + "supports-color": "^10.0.0" + } + }, + "node_modules/@poppinss/exception": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@poppinss/exception/-/exception-1.2.3.tgz", + "integrity": "sha512-dCED+QRChTVatE9ibtoaxc+WkdzOSjYTKi/+uacHWIsfodVfpsueo3+DKpgU5Px8qXjgmXkSvhXvSCz3fnP9lw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.3.tgz", + "integrity": "sha512-x35CNW/ANXG3hE/EZpRU8MXX1JDN86hBb2wMGAtltkz7pc6cxgjpy1OMMfDosOQ+2hWqIkag/fGok1Yady9nGw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.3.tgz", + "integrity": "sha512-xw3xtkDApIOGayehp2+Rz4zimfkaX65r4t47iy+ymQB2G4iJCBBfj0ogVg5jpvjpn8UWn/+q9tprxleYeNp3Hw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.3.tgz", + "integrity": "sha512-vo6Y5Qfpx7/5EaamIwi0WqW2+zfiusVihKatLvtN1VFVy3D13uERk/6gZLU1UiHRL6fDXqj/ELIeVRGnvcTE1g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.3.tgz", + "integrity": "sha512-D+0QGcZhBzTN82weOnsSlY7V7+RMmPuF1CkbxyMAGE8+ZHeUjyb76ZiWmBlCu//AQQONvxcqRbwZTajZKqjuOw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.3.tgz", + "integrity": "sha512-6HnvHCT7fDyj6R0Ph7A6x8dQS/S38MClRWeDLqc0MdfWkxjiu1HSDYrdPhqSILzjTIC/pnXbbJbo+ft+gy/9hQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.3.tgz", + "integrity": "sha512-KHLgC3WKlUYW3ShFKnnosZDOJ0xjg9zp7au3sIm2bs/tGBeC2ipmvRh/N7JKi0t9Ue20C0dpEshi8WUubg+cnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.3.tgz", + "integrity": "sha512-DV6fJoxEYWJOvaZIsok7KrYl0tPvga5OZ2yvKHNNYyk/2roMLqQAbGhr78EQ5YhHpnhLKJD3S1WFusAkmUuV5g==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.3.tgz", + "integrity": "sha512-mQKoJAzvuOs6F+TZybQO4GOTSMUu7v0WdxEk24krQ/uUxXoPTtHjuaUuPmFhtBcM4K0ons8nrE3JyhTuCFtT/w==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.3.tgz", + "integrity": "sha512-Whjj2qoiJ6+OOJMGptTYazaJvjOJm+iKHpXQM1P3LzGjt7Ff++Tp7nH4N8J/BUA7R9IHfDyx4DJIflifwnbmIA==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.3.tgz", + "integrity": "sha512-4YTNHKqGng5+yiZt3mg77nmyuCfmNfX4fPmyUapBcIk+BdwSwmCWGXOUxhXbBEkFHtoN5boLj/5NON+u5QC9tg==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.3.tgz", + "integrity": "sha512-SU3kNlhkpI4UqlUc2VXPGK9o886ZsSeGfMAX2ba2b8DKmMXq4AL7KUrkSWVbb7koVqx41Yczx6dx5PNargIrEA==", + "cpu": [ + "loong64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.3.tgz", + "integrity": "sha512-6lDLl5h4TXpB1mTf2rQWnAk/LcXrx9vBfu/DT5TIPhvMhRWaZ5MxkIc8u4lJAmBo6klTe1ywXIUHFjylW505sg==", + "cpu": [ + "loong64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.3.tgz", + "integrity": "sha512-BMo8bOw8evlup/8G+cj5xWtPyp93xPdyoSN16Zy90Q2QZ0ZYRhCt6ZJSwbrRzG9HApFabjwj2p25TUPDWrhzqQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.3.tgz", + "integrity": "sha512-E0L8X1dZN1/Rph+5VPF6Xj2G7JJvMACVXtamTJIDrVI44Y3K+G8gQaMEAavbqCGTa16InptiVrX6eM6pmJ+7qA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.3.tgz", + "integrity": "sha512-oZJ/WHaVfHUiRAtmTAeo3DcevNsVvH8mbvodjZy7D5QKvCefO371SiKRpxoDcCxB3PTRTLayWBkvmDQKTcX/sw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.3.tgz", + "integrity": "sha512-Dhbyh7j9FybM3YaTgaHmVALwA8AkUwTPccyCQ79TG9AJUsMQqgN1DDEZNr4+QUfwiWvLDumW5vdwzoeUF+TNxQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.3.tgz", + "integrity": "sha512-cJd1X5XhHHlltkaypz1UcWLA8AcoIi1aWhsvaWDskD1oz2eKCypnqvTQ8ykMNI0RSmm7NkTdSqSSD7zM0xa6Ig==", + "cpu": [ + "s390x" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.3.tgz", + "integrity": "sha512-DAZDBHQfG2oQuhY7mc6I3/qB4LU2fQCjRvxbDwd/Jdvb9fypP4IJ4qmtu6lNjes6B531AI8cg1aKC2di97bUxA==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.3.tgz", + "integrity": "sha512-cRxsE8c13mZOh3vP+wLDxpQBRrOHDIGOWyDL93Sy0Ga8y515fBcC2pjUfFwUe5T7tqvTvWbCpg1URM/AXdWIXA==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.3.tgz", + "integrity": "sha512-QaWcIgRxqEdQdhJqW4DJctsH6HCmo5vHxY0krHSX4jMtOqfzC+dqDGuHM87bu4H8JBeibWx7jFz+h6/4C8wA5Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.3.tgz", + "integrity": "sha512-AaXwSvUi3QIPtroAUw1t5yHGIyqKEXwH54WUocFolZhpGDruJcs8c+xPNDRn4XiQsS7MEwnYsHW2l0MBLDMkWg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.3.tgz", + "integrity": "sha512-65LAKM/bAWDqKNEelHlcHvm2V+Vfb8C6INFxQXRHCvaVN1rJfwr4NvdP4FyzUaLqWfaCGaadf6UbTm8xJeYfEg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.3.tgz", + "integrity": "sha512-EEM2gyhBF5MFnI6vMKdX1LAosE627RGBzIoGMdLloPZkXrUN0Ckqgr2Qi8+J3zip/8NVVro3/FjB+tjhZUgUHA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.3.tgz", + "integrity": "sha512-E5Eb5H/DpxaoXH++Qkv28RcUJboMopmdDUALBczvHMf7hNIxaDZqwY5lK12UK1BHacSmvupoEWGu+n993Z0y1A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.3.tgz", + "integrity": "sha512-hPt/bgL5cE+Qp+/TPHBqptcAgPzgj46mPcg/16zNUmbQk0j+mOEQV/+Lqu8QRtDV3Ek95Q6FeFITpuhl6OTsAA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@sindresorhus/is": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-7.2.0.tgz", + "integrity": "sha512-P1Cz1dWaFfR4IR+U13mqqiGsLFf1KbayybWwdd2vfctdV6hDpUkgCY0nKOLLTMSoRd/jJNjtbqzf13K8DCCXQw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@speed-highlight/core": { + "version": "1.2.15", + "resolved": "https://registry.npmjs.org/@speed-highlight/core/-/core-1.2.15.tgz", + "integrity": "sha512-BMq1K3DsElxDWawkX6eLg9+CKJrTVGCBAWVuHXVUV2u0s2711qiChLSId6ikYPfxhdYocLNt3wWwSvDiTvFabw==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.9.tgz", + "integrity": "sha512-GhdPgy1el4/ImP05X05Uw4cw2/M93BCUmnEvWZNStlCzEKME4Fkk+YpoA5OiHNQmoS7Cafb8Xa3Pya8m1Qrzeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "25.6.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.6.2.tgz", + "integrity": "sha512-sokuT28dxf9JT5Kady1fsXOvI4HVpjZa95NKT5y9PNTIrs2AsobR4GFAA90ZG8M+nxVRLysCXsVj6eGC7Vbrlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.19.0" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/blake3-wasm": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/blake3-wasm/-/blake3-wasm-2.1.5.tgz", + "integrity": "sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g==", + "dev": true, + "license": "MIT" + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/check-error": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", + "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/error-stack-parser-es": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/error-stack-parser-es/-/error-stack-parser-es-1.0.5.tgz", + "integrity": "sha512-5qucVt2XcuGMcEGgWI7i+yZpmpByQ8J1lHhcL7PwqCwu9FPP3VUXzT4ltHe5i2z9dePwEHcDVOAfSnHsOlCXRA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.3.tgz", + "integrity": "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.3", + "@esbuild/android-arm": "0.27.3", + "@esbuild/android-arm64": "0.27.3", + "@esbuild/android-x64": "0.27.3", + "@esbuild/darwin-arm64": "0.27.3", + "@esbuild/darwin-x64": "0.27.3", + "@esbuild/freebsd-arm64": "0.27.3", + "@esbuild/freebsd-x64": "0.27.3", + "@esbuild/linux-arm": "0.27.3", + "@esbuild/linux-arm64": "0.27.3", + "@esbuild/linux-ia32": "0.27.3", + "@esbuild/linux-loong64": "0.27.3", + "@esbuild/linux-mips64el": "0.27.3", + "@esbuild/linux-ppc64": "0.27.3", + "@esbuild/linux-riscv64": "0.27.3", + "@esbuild/linux-s390x": "0.27.3", + "@esbuild/linux-x64": "0.27.3", + "@esbuild/netbsd-arm64": "0.27.3", + "@esbuild/netbsd-x64": "0.27.3", + "@esbuild/openbsd-arm64": "0.27.3", + "@esbuild/openbsd-x64": "0.27.3", + "@esbuild/openharmony-arm64": "0.27.3", + "@esbuild/sunos-x64": "0.27.3", + "@esbuild/win32-arm64": "0.27.3", + "@esbuild/win32-ia32": "0.27.3", + "@esbuild/win32-x64": "0.27.3" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/miniflare": { + "version": "4.20260310.0", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-4.20260310.0.tgz", + "integrity": "sha512-uC5vNPenFpDSj5aUU3wGSABG6UUqMr+Xs1m4AkCrTHo37F4Z6xcQw5BXqViTfPDVT/zcYH1UgTVoXhr1l6ZMXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "0.8.1", + "sharp": "^0.34.5", + "undici": "7.18.2", + "workerd": "1.20260310.1", + "ws": "8.18.0", + "youch": "4.1.0-beta.10" + }, + "bin": { + "miniflare": "bootstrap.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.12", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.12.tgz", + "integrity": "sha512-ZB9RH/39qpq5Vu6Y+NmUaFhQR6pp+M2Xt76XBnEwDaGcVAqhlvxrl3B2bKS5D3NH3QR76v3aSrKaF/Kiy7lEtQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/path-to-regexp": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.14", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.14.tgz", + "integrity": "sha512-SoSL4+OSEtR99LHFZQiJLkT59C5B1amGO1NzTwj7TT1qCUgUO6hxOvzkOYxD+vMrXBM3XJIKzokoERdqQq/Zmg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rollup": { + "version": "4.60.3", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.3.tgz", + "integrity": "sha512-pAQK9HalE84QSm4Po3EmWIZPd3FnjkShVkiMlz1iligWYkWQ7wHYd1PF/T7QZ5TVSD6uSTon5gBVMSM4JfBV+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.3", + "@rollup/rollup-android-arm64": "4.60.3", + "@rollup/rollup-darwin-arm64": "4.60.3", + "@rollup/rollup-darwin-x64": "4.60.3", + "@rollup/rollup-freebsd-arm64": "4.60.3", + "@rollup/rollup-freebsd-x64": "4.60.3", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.3", + "@rollup/rollup-linux-arm-musleabihf": "4.60.3", + "@rollup/rollup-linux-arm64-gnu": "4.60.3", + "@rollup/rollup-linux-arm64-musl": "4.60.3", + "@rollup/rollup-linux-loong64-gnu": "4.60.3", + "@rollup/rollup-linux-loong64-musl": "4.60.3", + "@rollup/rollup-linux-ppc64-gnu": "4.60.3", + "@rollup/rollup-linux-ppc64-musl": "4.60.3", + "@rollup/rollup-linux-riscv64-gnu": "4.60.3", + "@rollup/rollup-linux-riscv64-musl": "4.60.3", + "@rollup/rollup-linux-s390x-gnu": "4.60.3", + "@rollup/rollup-linux-x64-gnu": "4.60.3", + "@rollup/rollup-linux-x64-musl": "4.60.3", + "@rollup/rollup-openbsd-x64": "4.60.3", + "@rollup/rollup-openharmony-arm64": "4.60.3", + "@rollup/rollup-win32-arm64-msvc": "4.60.3", + "@rollup/rollup-win32-ia32-msvc": "4.60.3", + "@rollup/rollup-win32-x64-gnu": "4.60.3", + "@rollup/rollup-win32-x64-msvc": "4.60.3", + "fsevents": "~2.3.2" + } + }, + "node_modules/rollup/node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.8.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.8.0.tgz", + "integrity": "sha512-AcM7dV/5ul4EekoQ29Agm5vri8JNqRyj39o0qpX6vDF2GZrtutZl5RwgD1XnZjiTAfncsJhMI48QQH3sN87YNA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/sharp": { + "version": "0.34.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.34.5.tgz", + "integrity": "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@img/colour": "^1.0.0", + "detect-libc": "^2.1.2", + "semver": "^7.7.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.34.5", + "@img/sharp-darwin-x64": "0.34.5", + "@img/sharp-libvips-darwin-arm64": "1.2.4", + "@img/sharp-libvips-darwin-x64": "1.2.4", + "@img/sharp-libvips-linux-arm": "1.2.4", + "@img/sharp-libvips-linux-arm64": "1.2.4", + "@img/sharp-libvips-linux-ppc64": "1.2.4", + "@img/sharp-libvips-linux-riscv64": "1.2.4", + "@img/sharp-libvips-linux-s390x": "1.2.4", + "@img/sharp-libvips-linux-x64": "1.2.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", + "@img/sharp-libvips-linuxmusl-x64": "1.2.4", + "@img/sharp-linux-arm": "0.34.5", + "@img/sharp-linux-arm64": "0.34.5", + "@img/sharp-linux-ppc64": "0.34.5", + "@img/sharp-linux-riscv64": "0.34.5", + "@img/sharp-linux-s390x": "0.34.5", + "@img/sharp-linux-x64": "0.34.5", + "@img/sharp-linuxmusl-arm64": "0.34.5", + "@img/sharp-linuxmusl-x64": "0.34.5", + "@img/sharp-wasm32": "0.34.5", + "@img/sharp-win32-arm64": "0.34.5", + "@img/sharp-win32-ia32": "0.34.5", + "@img/sharp-win32-x64": "0.34.5" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/strip-literal": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", + "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/supports-color": { + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-10.2.2.tgz", + "integrity": "sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD", + "optional": true + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.18.2.tgz", + "integrity": "sha512-y+8YjDFzWdQlSE9N5nzKMT3g4a5UBX1HKowfdXh0uvAnTaqqwqB92Jt4UXBAeKekDs5IaDKyJFR4X1gYVCgXcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/undici-types": { + "version": "7.19.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.19.2.tgz", + "integrity": "sha512-qYVnV5OEm2AW8cJMCpdV20CDyaN3g0AjDlOGf1OW4iaDEx8MwdtChUp4zu4H0VP3nDRF/8RKWH+IPp9uW0YGZg==", + "dev": true, + "license": "MIT" + }, + "node_modules/unenv": { + "version": "2.0.0-rc.24", + "resolved": "https://registry.npmjs.org/unenv/-/unenv-2.0.0-rc.24.tgz", + "integrity": "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "pathe": "^2.0.3" + } + }, + "node_modules/vite": { + "version": "7.3.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.3.tgz", + "integrity": "sha512-/4XH147Ui7OGTjg3HbdWe5arnZQSbfuRzdr9Ec7TQi5I7R+ir0Rlc9GIvD4v0XZurELqA035KVXJXpR61xhiTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.27.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/workerd": { + "version": "1.20260310.1", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20260310.1.tgz", + "integrity": "sha512-yawXhypXXHtArikJj15HOMknNGikpBbSg2ZDe6lddUbqZnJXuCVSkgc/0ArUeVMG1jbbGvpst+REFtKwILvRTQ==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "bin": { + "workerd": "bin/workerd" + }, + "engines": { + "node": ">=16" + }, + "optionalDependencies": { + "@cloudflare/workerd-darwin-64": "1.20260310.1", + "@cloudflare/workerd-darwin-arm64": "1.20260310.1", + "@cloudflare/workerd-linux-64": "1.20260310.1", + "@cloudflare/workerd-linux-arm64": "1.20260310.1", + "@cloudflare/workerd-windows-64": "1.20260310.1" + } + }, + "node_modules/wrangler": { + "version": "4.90.0", + "resolved": "https://registry.npmjs.org/wrangler/-/wrangler-4.90.0.tgz", + "integrity": "sha512-bmNIykl59TfCUn5xQgU7IWylSsPx3LQaPLMSAq2VQHt89CBrcj9qXQ0eYfjBCWA5XTBVgten391evt7xxtXwcA==", + "dev": true, + "license": "MIT OR Apache-2.0", + "dependencies": { + "@cloudflare/kv-asset-handler": "0.5.0", + "@cloudflare/unenv-preset": "2.16.1", + "blake3-wasm": "2.1.5", + "esbuild": "0.27.3", + "miniflare": "4.20260507.1", + "path-to-regexp": "6.3.0", + "unenv": "2.0.0-rc.24", + "workerd": "1.20260507.1" + }, + "bin": { + "wrangler": "bin/wrangler.js", + "wrangler2": "bin/wrangler.js" + }, + "engines": { + "node": ">=22.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@cloudflare/workers-types": "^4.20260507.1" + }, + "peerDependenciesMeta": { + "@cloudflare/workers-types": { + "optional": true + } + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-darwin-64": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20260507.1.tgz", + "integrity": "sha512-S85aMwcaPJUjKWDiG6iMMnioKWtPLACa6m0j/EhHR1GYfVpnxb974cBc6d25L+sf7jHWHJI2u5hGp0UTJ7MtXQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-darwin-arm64": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20260507.1.tgz", + "integrity": "sha512-GMEBu8Zp9Q97HLnf7bWJN4KjWpN5MxpeqdvHjBGWNl8UYprJI0k+Jkp89+Wh5S8vIon+HoVbDfOzPa7VwgL6Eg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-linux-64": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20260507.1.tgz", + "integrity": "sha512-QlrKEBdgA3uVc0Ok0Q3+0/CW0CTjgj5ySir1i1YY5FXVv0X6GpwtnB5umjunjF2MFprss+L+iFGZzxcSvMC1nA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-linux-arm64": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20260507.1.tgz", + "integrity": "sha512-eGbbupEtK2nh9V9Dhcx3vv3GTKeXqSVNgAEYVCCN0NGS9tl9HbMoHRX/4JL181FKXROMigWBCQVL//qPhsAzBQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/@cloudflare/workerd-windows-64": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20260507.1.tgz", + "integrity": "sha512-dmClJ/E0BAcuDetQIZFqbeAXejWrG5pysGRMQ6T83Y0IW/7IAamY2zFEkAJ10I5xwZsdHuYsZtzlOxpEXpJs7A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=16" + } + }, + "node_modules/wrangler/node_modules/miniflare": { + "version": "4.20260507.1", + "resolved": "https://registry.npmjs.org/miniflare/-/miniflare-4.20260507.1.tgz", + "integrity": "sha512-PSXBiLExTdZ4UGO/raKCHQauUpYL7F880ZRB7j0+78Rv8h7TsdN2E/iEDK9sK2Y+SPQ5wJSeAa+rDeVKoZZoEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "0.8.1", + "sharp": "^0.34.5", + "undici": "7.24.8", + "workerd": "1.20260507.1", + "ws": "8.18.0", + "youch": "4.1.0-beta.10" + }, + "bin": { + "miniflare": "bootstrap.js" + }, + "engines": { + "node": ">=22.0.0" + } + }, + "node_modules/wrangler/node_modules/undici": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.8.tgz", + "integrity": "sha512-6KQ/+QxK49Z/p3HO6E5ZCZWNnCasyZLa5ExaVYyvPxUwKtbCPMKELJOqh7EqOle0t9cH/7d2TaaTRRa6Nhs4YQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/wrangler/node_modules/workerd": { + "version": "1.20260507.1", + "resolved": "https://registry.npmjs.org/workerd/-/workerd-1.20260507.1.tgz", + "integrity": "sha512-z7JhsFSe6+X1b5fUHaVpo15VM1IRMJiLofEkq8iKdCo+Veqc+FUg5lIsuz8NwePxuSKrXtO4ZQpGkQLbPVXFhg==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "bin": { + "workerd": "bin/workerd" + }, + "engines": { + "node": ">=16" + }, + "optionalDependencies": { + "@cloudflare/workerd-darwin-64": "1.20260507.1", + "@cloudflare/workerd-darwin-arm64": "1.20260507.1", + "@cloudflare/workerd-linux-64": "1.20260507.1", + "@cloudflare/workerd-linux-arm64": "1.20260507.1", + "@cloudflare/workerd-windows-64": "1.20260507.1" + } + }, + "node_modules/ws": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/youch": { + "version": "4.1.0-beta.10", + "resolved": "https://registry.npmjs.org/youch/-/youch-4.1.0-beta.10.tgz", + "integrity": "sha512-rLfVLB4FgQneDr0dv1oddCVZmKjcJ6yX6mS4pU82Mq/Dt9a3cLZQ62pDBL4AUO+uVrCvtWz3ZFUL2HFAFJ/BXQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@poppinss/colors": "^4.1.5", + "@poppinss/dumper": "^0.6.4", + "@speed-highlight/core": "^1.2.7", + "cookie": "^1.0.2", + "youch-core": "^0.3.3" + } + }, + "node_modules/youch-core": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/youch-core/-/youch-core-0.3.3.tgz", + "integrity": "sha512-ho7XuGjLaJ2hWHoK8yFnsUGy2Y5uDpqSTq1FkHLK4/oqKtyUU1AFbOOxY4IpC9f0fTLjwYbslUz0Po5BpD1wrA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@poppinss/exception": "^1.2.2", + "error-stack-parser-es": "^1.0.5" + } + } + } +} diff --git a/edge-api/package.json b/edge-api/package.json new file mode 100644 index 0000000000..4452b7a7f3 --- /dev/null +++ b/edge-api/package.json @@ -0,0 +1,19 @@ +{ + "name": "edge-api", + "version": "0.0.0", + "private": true, + "scripts": { + "deploy": "wrangler deploy", + "dev": "wrangler dev", + "start": "wrangler dev", + "test": "vitest", + "cf-typegen": "wrangler types" + }, + "devDependencies": { + "@cloudflare/vitest-pool-workers": "^0.12.4", + "@types/node": "^25.6.2", + "typescript": "^5.5.2", + "vitest": "~3.2.0", + "wrangler": "^4.90.0" + } +} \ No newline at end of file diff --git a/edge-api/src/index.ts b/edge-api/src/index.ts new file mode 100644 index 0000000000..0ba8ff17f4 --- /dev/null +++ b/edge-api/src/index.ts @@ -0,0 +1,163 @@ +/** + * edge-api — Lab 17 Cloudflare Worker. + * + * Routes: + * GET / — service info (vars + uptime) + * GET /health — liveness + * GET /edge — request.cf metadata (Task 3) + * GET /counter — KV-backed visit counter (Task 4 persistence) + * GET /whoami — secrets-backed admin info, redacted (Task 4 secrets) + * * — 404 JSON + * + * Bindings (see wrangler.jsonc): + * APP_NAME — plaintext var + * COURSE_NAME — plaintext var + * API_TOKEN — secret (set via `wrangler secret put`) + * ADMIN_EMAIL — secret (set via `wrangler secret put`) + * SETTINGS — KV namespace + */ + +export interface Env { + APP_NAME: string; + COURSE_NAME: string; + API_TOKEN: string; + ADMIN_EMAIL: string; + SETTINGS: KVNamespace; +} + +// Workers V8 isolates do not return wall time at module init (timing-attack +// mitigation), so we lazily seed the start timestamp on the first fetch. +let START = 0; +const VERSION = "1.0.1"; + +function maskTail(value: string | undefined, keep = 4): string { + if (!value) return ""; + if (value.length <= keep) return "*".repeat(value.length); + return "*".repeat(value.length - keep) + value.slice(-keep); +} + +function maskEmail(email: string | undefined): string { + if (!email) return ""; + const [user, domain] = email.split("@", 2); + if (!domain) return maskTail(email); + const head = user.length <= 2 ? user[0] ?? "*" : user.slice(0, 2); + return `${head}***@${domain}`; +} + +export default { + async fetch(request: Request, env: Env, _ctx: ExecutionContext): Promise { + const url = new URL(request.url); + const t0 = Date.now(); + if (START === 0) START = t0; + + // Structured edge log — visible via `wrangler tail` and Workers Logs. + console.log( + JSON.stringify({ + level: "info", + event: "request_start", + method: request.method, + path: url.pathname, + colo: request.cf?.colo ?? null, + country: request.cf?.country ?? null, + ts: new Date().toISOString(), + }), + ); + + const response = await route(request, env, url); + + console.log( + JSON.stringify({ + level: "info", + event: "request_end", + path: url.pathname, + status: response.status, + duration_ms: Date.now() - t0, + }), + ); + + return response; + }, +} satisfies ExportedHandler; + +async function route(request: Request, env: Env, url: URL): Promise { + switch (url.pathname) { + case "/": + return Response.json({ + service: env.APP_NAME ?? "edge-api", + course: env.COURSE_NAME ?? "devops-core", + version: VERSION, + message: "Hello from Cloudflare Workers — v2 (deployment-history demo)", + isolate_uptime_ms: START === 0 ? 0 : Date.now() - START, + timestamp: new Date().toISOString(), + routes: ["/", "/health", "/edge", "/counter", "/whoami"], + }); + + case "/health": + return Response.json({ + status: "ok", + timestamp: new Date().toISOString(), + }); + + case "/edge": + // Cloudflare populates request.cf with edge-side metadata. + // In `wrangler dev` (local mode) most fields are null — that's expected; + // they appear once the Worker runs on the real edge. + return Response.json({ + colo: request.cf?.colo ?? null, + country: request.cf?.country ?? null, + city: request.cf?.city ?? null, + region: request.cf?.region ?? null, + asn: request.cf?.asn ?? null, + asOrganization: request.cf?.asOrganization ?? null, + httpProtocol: request.cf?.httpProtocol ?? null, + tlsVersion: request.cf?.tlsVersion ?? null, + timezone: request.cf?.timezone ?? null, + clientTcpRtt: request.cf?.clientTcpRtt ?? null, + note: + "Fields are populated by Cloudflare's edge runtime. Local `wrangler dev` returns nulls — deploy and re-test against the workers.dev URL.", + }); + + case "/counter": { + // KV-backed visits counter. Read → increment → write. + // KV is eventually consistent — fine for a demo counter. + const raw = await env.SETTINGS.get("visits"); + const visits = Number(raw ?? "0") + 1; + await env.SETTINGS.put("visits", String(visits)); + + console.log( + JSON.stringify({ + level: "info", + event: "counter_inc", + previous: Number(raw ?? "0"), + next: visits, + }), + ); + + return Response.json({ + visits, + key: "visits", + note: "Persisted in Workers KV (binding SETTINGS); survives redeploys.", + }); + } + + case "/whoami": + // Secrets are read from env but never returned in plaintext. + return Response.json({ + app: env.APP_NAME ?? "edge-api", + admin_email: maskEmail(env.ADMIN_EMAIL), + api_token: maskTail(env.API_TOKEN), + note: + "Both ADMIN_EMAIL and API_TOKEN are Wrangler secrets (`wrangler secret put`). Values shown here are redacted; only the last 4 characters of API_TOKEN and the domain of ADMIN_EMAIL are visible.", + }); + + default: + return Response.json( + { + error: "not_found", + path: url.pathname, + known: ["/", "/health", "/edge", "/counter", "/whoami"], + }, + { status: 404 }, + ); + } +} diff --git a/edge-api/tsconfig.json b/edge-api/tsconfig.json new file mode 100644 index 0000000000..8c98cdbece --- /dev/null +++ b/edge-api/tsconfig.json @@ -0,0 +1,46 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig.json to read more about this file */ + + /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ + "target": "es2024", + /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + "lib": ["es2024"], + /* Specify what JSX code is generated. */ + "jsx": "react-jsx", + + /* Specify what module code is generated. */ + "module": "es2022", + /* Specify how TypeScript looks up a file from a given module specifier. */ + "moduleResolution": "Bundler", + /* Enable importing .json files */ + "resolveJsonModule": true, + + /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */ + "allowJs": true, + /* Enable error reporting in type-checked JavaScript files. */ + "checkJs": false, + + /* Disable emitting files from a compilation. */ + "noEmit": true, + + /* Ensure that each file can be safely transpiled without relying on other imports. */ + "isolatedModules": true, + /* Allow 'import x from y' when a module doesn't have a default export. */ + "allowSyntheticDefaultImports": true, + /* Ensure that casing is correct in imports. */ + "forceConsistentCasingInFileNames": true, + + /* Enable all strict type-checking options. */ + "strict": true, + + /* Skip type checking all .d.ts files. */ + "skipLibCheck": true, + "types": [ + "./worker-configuration.d.ts", + "node" + ] + }, + "exclude": ["test"], + "include": ["worker-configuration.d.ts", "src/**/*.ts"] +} diff --git a/edge-api/vitest.config.mts b/edge-api/vitest.config.mts new file mode 100644 index 0000000000..7ccad75efa --- /dev/null +++ b/edge-api/vitest.config.mts @@ -0,0 +1,11 @@ +import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config"; + +export default defineWorkersConfig({ + test: { + poolOptions: { + workers: { + wrangler: { configPath: "./wrangler.jsonc" }, + }, + }, + }, +}); diff --git a/edge-api/worker-configuration.d.ts b/edge-api/worker-configuration.d.ts new file mode 100644 index 0000000000..10e0f2adf7 --- /dev/null +++ b/edge-api/worker-configuration.d.ts @@ -0,0 +1,13559 @@ +/* eslint-disable */ +// Generated by Wrangler by running `wrangler types` (hash: e3ee7b713dafee05ac478d9226e14845) +// Runtime types generated with workerd@1.20260507.1 2026-05-10 nodejs_compat +declare namespace Cloudflare { + interface GlobalProps { + mainModule: typeof import("./src/index"); + } + interface Env { + SETTINGS: KVNamespace; + APP_NAME: "edge-api"; + COURSE_NAME: "devops-core"; + } +} +interface Env extends Cloudflare.Env {} +type StringifyValues> = { + [Binding in keyof EnvType]: EnvType[Binding] extends string ? EnvType[Binding] : string; +}; +declare namespace NodeJS { + interface ProcessEnv extends StringifyValues> {} +} + +// Begin runtime types +/*! ***************************************************************************** +Copyright (c) Cloudflare. All rights reserved. +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ +/* eslint-disable */ +// noinspection JSUnusedGlobalSymbols +declare var onmessage: never; +/** + * The **`DOMException`** interface represents an abnormal event (called an **exception**) that occurs as a result of calling a method or accessing a property of a web API. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException) + */ +declare class DOMException extends Error { + constructor(message?: string, name?: string); + /** + * The **`message`** read-only property of the a message or description associated with the given error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/message) + */ + readonly message: string; + /** + * The **`name`** read-only property of the one of the strings associated with an error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/name) + */ + readonly name: string; + /** + * The **`code`** read-only property of the DOMException interface returns one of the legacy error code constants, or `0` if none match. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/code) + */ + readonly code: number; + static readonly INDEX_SIZE_ERR: number; + static readonly DOMSTRING_SIZE_ERR: number; + static readonly HIERARCHY_REQUEST_ERR: number; + static readonly WRONG_DOCUMENT_ERR: number; + static readonly INVALID_CHARACTER_ERR: number; + static readonly NO_DATA_ALLOWED_ERR: number; + static readonly NO_MODIFICATION_ALLOWED_ERR: number; + static readonly NOT_FOUND_ERR: number; + static readonly NOT_SUPPORTED_ERR: number; + static readonly INUSE_ATTRIBUTE_ERR: number; + static readonly INVALID_STATE_ERR: number; + static readonly SYNTAX_ERR: number; + static readonly INVALID_MODIFICATION_ERR: number; + static readonly NAMESPACE_ERR: number; + static readonly INVALID_ACCESS_ERR: number; + static readonly VALIDATION_ERR: number; + static readonly TYPE_MISMATCH_ERR: number; + static readonly SECURITY_ERR: number; + static readonly NETWORK_ERR: number; + static readonly ABORT_ERR: number; + static readonly URL_MISMATCH_ERR: number; + static readonly QUOTA_EXCEEDED_ERR: number; + static readonly TIMEOUT_ERR: number; + static readonly INVALID_NODE_TYPE_ERR: number; + static readonly DATA_CLONE_ERR: number; + get stack(): any; + set stack(value: any); +} +type WorkerGlobalScopeEventMap = { + fetch: FetchEvent; + scheduled: ScheduledEvent; + queue: QueueEvent; + unhandledrejection: PromiseRejectionEvent; + rejectionhandled: PromiseRejectionEvent; +}; +declare abstract class WorkerGlobalScope extends EventTarget { + EventTarget: typeof EventTarget; +} +/* The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). * + * The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console) + */ +interface Console { + "assert"(condition?: boolean, ...data: any[]): void; + /** + * The **`console.clear()`** static method clears the console if possible. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/clear_static) + */ + clear(): void; + /** + * The **`console.count()`** static method logs the number of times that this particular call to `count()` has been called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static) + */ + count(label?: string): void; + /** + * The **`console.countReset()`** static method resets counter used with console/count_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static) + */ + countReset(label?: string): void; + /** + * The **`console.debug()`** static method outputs a message to the console at the 'debug' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static) + */ + debug(...data: any[]): void; + /** + * The **`console.dir()`** static method displays a list of the properties of the specified JavaScript object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dir_static) + */ + dir(item?: any, options?: any): void; + /** + * The **`console.dirxml()`** static method displays an interactive tree of the descendant elements of the specified XML/HTML element. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dirxml_static) + */ + dirxml(...data: any[]): void; + /** + * The **`console.error()`** static method outputs a message to the console at the 'error' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/error_static) + */ + error(...data: any[]): void; + /** + * The **`console.group()`** static method creates a new inline group in the Web console log, causing any subsequent console messages to be indented by an additional level, until console/groupEnd_static is called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static) + */ + group(...data: any[]): void; + /** + * The **`console.groupCollapsed()`** static method creates a new inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static) + */ + groupCollapsed(...data: any[]): void; + /** + * The **`console.groupEnd()`** static method exits the current inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static) + */ + groupEnd(): void; + /** + * The **`console.info()`** static method outputs a message to the console at the 'info' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static) + */ + info(...data: any[]): void; + /** + * The **`console.log()`** static method outputs a message to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/log_static) + */ + log(...data: any[]): void; + /** + * The **`console.table()`** static method displays tabular data as a table. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/table_static) + */ + table(tabularData?: any, properties?: string[]): void; + /** + * The **`console.time()`** static method starts a timer you can use to track how long an operation takes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static) + */ + time(label?: string): void; + /** + * The **`console.timeEnd()`** static method stops a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static) + */ + timeEnd(label?: string): void; + /** + * The **`console.timeLog()`** static method logs the current value of a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static) + */ + timeLog(label?: string, ...data: any[]): void; + timeStamp(label?: string): void; + /** + * The **`console.trace()`** static method outputs a stack trace to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static) + */ + trace(...data: any[]): void; + /** + * The **`console.warn()`** static method outputs a warning message to the console at the 'warning' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/warn_static) + */ + warn(...data: any[]): void; +} +declare const console: Console; +type BufferSource = ArrayBufferView | ArrayBuffer; +type TypedArray = Int8Array | Uint8Array | Uint8ClampedArray | Int16Array | Uint16Array | Int32Array | Uint32Array | Float32Array | Float64Array | BigInt64Array | BigUint64Array; +declare namespace WebAssembly { + class CompileError extends Error { + constructor(message?: string); + } + class RuntimeError extends Error { + constructor(message?: string); + } + type ValueType = "anyfunc" | "externref" | "f32" | "f64" | "i32" | "i64" | "v128"; + interface GlobalDescriptor { + value: ValueType; + mutable?: boolean; + } + class Global { + constructor(descriptor: GlobalDescriptor, value?: any); + value: any; + valueOf(): any; + } + type ImportValue = ExportValue | number; + type ModuleImports = Record; + type Imports = Record; + type ExportValue = Function | Global | Memory | Table; + type Exports = Record; + class Instance { + constructor(module: Module, imports?: Imports); + readonly exports: Exports; + } + interface MemoryDescriptor { + initial: number; + maximum?: number; + shared?: boolean; + } + class Memory { + constructor(descriptor: MemoryDescriptor); + readonly buffer: ArrayBuffer; + grow(delta: number): number; + } + type ImportExportKind = "function" | "global" | "memory" | "table"; + interface ModuleExportDescriptor { + kind: ImportExportKind; + name: string; + } + interface ModuleImportDescriptor { + kind: ImportExportKind; + module: string; + name: string; + } + abstract class Module { + static customSections(module: Module, sectionName: string): ArrayBuffer[]; + static exports(module: Module): ModuleExportDescriptor[]; + static imports(module: Module): ModuleImportDescriptor[]; + } + type TableKind = "anyfunc" | "externref"; + interface TableDescriptor { + element: TableKind; + initial: number; + maximum?: number; + } + class Table { + constructor(descriptor: TableDescriptor, value?: any); + readonly length: number; + get(index: number): any; + grow(delta: number, value?: any): number; + set(index: number, value?: any): void; + } + function instantiate(module: Module, imports?: Imports): Promise; + function validate(bytes: BufferSource): boolean; +} +/** + * The **`ServiceWorkerGlobalScope`** interface of the Service Worker API represents the global execution context of a service worker. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ServiceWorkerGlobalScope) + */ +interface ServiceWorkerGlobalScope extends WorkerGlobalScope { + DOMException: typeof DOMException; + WorkerGlobalScope: typeof WorkerGlobalScope; + btoa(data: string): string; + atob(data: string): string; + setTimeout(callback: (...args: any[]) => void, msDelay?: number): number; + setTimeout(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; + clearTimeout(timeoutId: number | null): void; + setInterval(callback: (...args: any[]) => void, msDelay?: number): number; + setInterval(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; + clearInterval(timeoutId: number | null): void; + queueMicrotask(task: Function): void; + structuredClone(value: T, options?: StructuredSerializeOptions): T; + reportError(error: any): void; + fetch(input: RequestInfo | URL, init?: RequestInit): Promise; + self: ServiceWorkerGlobalScope; + crypto: Crypto; + caches: CacheStorage; + scheduler: Scheduler; + performance: Performance; + Cloudflare: Cloudflare; + readonly origin: string; + Event: typeof Event; + ExtendableEvent: typeof ExtendableEvent; + CustomEvent: typeof CustomEvent; + PromiseRejectionEvent: typeof PromiseRejectionEvent; + FetchEvent: typeof FetchEvent; + TailEvent: typeof TailEvent; + TraceEvent: typeof TailEvent; + ScheduledEvent: typeof ScheduledEvent; + MessageEvent: typeof MessageEvent; + CloseEvent: typeof CloseEvent; + ReadableStreamDefaultReader: typeof ReadableStreamDefaultReader; + ReadableStreamBYOBReader: typeof ReadableStreamBYOBReader; + ReadableStream: typeof ReadableStream; + WritableStream: typeof WritableStream; + WritableStreamDefaultWriter: typeof WritableStreamDefaultWriter; + TransformStream: typeof TransformStream; + ByteLengthQueuingStrategy: typeof ByteLengthQueuingStrategy; + CountQueuingStrategy: typeof CountQueuingStrategy; + ErrorEvent: typeof ErrorEvent; + MessageChannel: typeof MessageChannel; + MessagePort: typeof MessagePort; + EventSource: typeof EventSource; + ReadableStreamBYOBRequest: typeof ReadableStreamBYOBRequest; + ReadableStreamDefaultController: typeof ReadableStreamDefaultController; + ReadableByteStreamController: typeof ReadableByteStreamController; + WritableStreamDefaultController: typeof WritableStreamDefaultController; + TransformStreamDefaultController: typeof TransformStreamDefaultController; + CompressionStream: typeof CompressionStream; + DecompressionStream: typeof DecompressionStream; + TextEncoderStream: typeof TextEncoderStream; + TextDecoderStream: typeof TextDecoderStream; + Headers: typeof Headers; + Body: typeof Body; + Request: typeof Request; + Response: typeof Response; + WebSocket: typeof WebSocket; + WebSocketPair: typeof WebSocketPair; + WebSocketRequestResponsePair: typeof WebSocketRequestResponsePair; + AbortController: typeof AbortController; + AbortSignal: typeof AbortSignal; + TextDecoder: typeof TextDecoder; + TextEncoder: typeof TextEncoder; + navigator: Navigator; + Navigator: typeof Navigator; + URL: typeof URL; + URLSearchParams: typeof URLSearchParams; + URLPattern: typeof URLPattern; + Blob: typeof Blob; + File: typeof File; + FormData: typeof FormData; + Crypto: typeof Crypto; + SubtleCrypto: typeof SubtleCrypto; + CryptoKey: typeof CryptoKey; + CacheStorage: typeof CacheStorage; + Cache: typeof Cache; + FixedLengthStream: typeof FixedLengthStream; + IdentityTransformStream: typeof IdentityTransformStream; + HTMLRewriter: typeof HTMLRewriter; +} +declare function addEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetAddEventListenerOptions | boolean): void; +declare function removeEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetEventListenerOptions | boolean): void; +/** + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) + */ +declare function dispatchEvent(event: WorkerGlobalScopeEventMap[keyof WorkerGlobalScopeEventMap]): boolean; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/btoa) */ +declare function btoa(data: string): string; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/atob) */ +declare function atob(data: string): string; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ +declare function setTimeout(callback: (...args: any[]) => void, msDelay?: number): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ +declare function setTimeout(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearTimeout) */ +declare function clearTimeout(timeoutId: number | null): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ +declare function setInterval(callback: (...args: any[]) => void, msDelay?: number): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ +declare function setInterval(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearInterval) */ +declare function clearInterval(timeoutId: number | null): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/queueMicrotask) */ +declare function queueMicrotask(task: Function): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/structuredClone) */ +declare function structuredClone(value: T, options?: StructuredSerializeOptions): T; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/reportError) */ +declare function reportError(error: any): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/fetch) */ +declare function fetch(input: RequestInfo | URL, init?: RequestInit): Promise; +declare const self: ServiceWorkerGlobalScope; +/** +* The Web Crypto API provides a set of low-level functions for common cryptographic tasks. +* The Workers runtime implements the full surface of this API, but with some differences in +* the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms) +* compared to those implemented in most browsers. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/) +*/ +declare const crypto: Crypto; +/** +* The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) +*/ +declare const caches: CacheStorage; +declare const scheduler: Scheduler; +/** +* The Workers runtime supports a subset of the Performance API, used to measure timing and performance, +* as well as timing of subrequests and other operations. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) +*/ +declare const performance: Performance; +declare const Cloudflare: Cloudflare; +declare const origin: string; +declare const navigator: Navigator; +interface TestController { +} +interface ExecutionContext { + waitUntil(promise: Promise): void; + passThroughOnException(): void; + readonly exports: Cloudflare.Exports; + readonly props: Props; + cache?: CacheContext; + tracing?: Tracing; +} +type ExportedHandlerFetchHandler = (request: Request>, env: Env, ctx: ExecutionContext) => Response | Promise; +type ExportedHandlerConnectHandler = (socket: Socket, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTailHandler = (events: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTraceHandler = (traces: TraceItem[], env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTailStreamHandler = (event: TailStream.TailEvent, env: Env, ctx: ExecutionContext) => TailStream.TailEventHandlerType | Promise; +type ExportedHandlerScheduledHandler = (controller: ScheduledController, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerQueueHandler = (batch: MessageBatch, env: Env, ctx: ExecutionContext) => void | Promise; +type ExportedHandlerTestHandler = (controller: TestController, env: Env, ctx: ExecutionContext) => void | Promise; +interface ExportedHandler { + fetch?: ExportedHandlerFetchHandler; + connect?: ExportedHandlerConnectHandler; + tail?: ExportedHandlerTailHandler; + trace?: ExportedHandlerTraceHandler; + tailStream?: ExportedHandlerTailStreamHandler; + scheduled?: ExportedHandlerScheduledHandler; + test?: ExportedHandlerTestHandler; + email?: EmailExportedHandler; + queue?: ExportedHandlerQueueHandler; +} +interface StructuredSerializeOptions { + transfer?: any[]; +} +declare abstract class Navigator { + sendBeacon(url: string, body?: BodyInit): boolean; + readonly userAgent: string; + readonly hardwareConcurrency: number; + readonly platform: string; + readonly language: string; + readonly languages: string[]; +} +interface AlarmInvocationInfo { + readonly isRetry: boolean; + readonly retryCount: number; + readonly scheduledTime: number; +} +interface Cloudflare { + readonly compatibilityFlags: Record; +} +interface CachePurgeError { + code: number; + message: string; +} +interface CachePurgeResult { + success: boolean; + errors: CachePurgeError[]; +} +interface CachePurgeOptions { + tags?: string[]; + pathPrefixes?: string[]; + purgeEverything?: boolean; +} +interface CacheContext { + purge(options: CachePurgeOptions): Promise; +} +declare abstract class ColoLocalActorNamespace { + get(actorId: string): Fetcher; +} +interface DurableObject { + fetch(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; + alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; + webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; + webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; + webSocketError?(ws: WebSocket, error: unknown): void | Promise; +} +type DurableObjectStub = Fetcher & { + readonly id: DurableObjectId; + readonly name?: string; +}; +interface DurableObjectId { + toString(): string; + equals(other: DurableObjectId): boolean; + readonly name?: string; + readonly jurisdiction?: string; +} +declare abstract class DurableObjectNamespace { + newUniqueId(options?: DurableObjectNamespaceNewUniqueIdOptions): DurableObjectId; + idFromName(name: string): DurableObjectId; + idFromString(id: string): DurableObjectId; + get(id: DurableObjectId, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; + getByName(name: string, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; + jurisdiction(jurisdiction: DurableObjectJurisdiction): DurableObjectNamespace; +} +type DurableObjectJurisdiction = "eu" | "fedramp" | "fedramp-high"; +interface DurableObjectNamespaceNewUniqueIdOptions { + jurisdiction?: DurableObjectJurisdiction; +} +type DurableObjectLocationHint = "wnam" | "enam" | "sam" | "weur" | "eeur" | "apac" | "oc" | "afr" | "me"; +type DurableObjectRoutingMode = "primary-only"; +interface DurableObjectNamespaceGetDurableObjectOptions { + locationHint?: DurableObjectLocationHint; + routingMode?: DurableObjectRoutingMode; +} +interface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> { +} +interface DurableObjectState { + waitUntil(promise: Promise): void; + readonly exports: Cloudflare.Exports; + readonly props: Props; + readonly id: DurableObjectId; + readonly storage: DurableObjectStorage; + container?: Container; + facets: DurableObjectFacets; + blockConcurrencyWhile(callback: () => Promise): Promise; + acceptWebSocket(ws: WebSocket, tags?: string[]): void; + getWebSockets(tag?: string): WebSocket[]; + setWebSocketAutoResponse(maybeReqResp?: WebSocketRequestResponsePair): void; + getWebSocketAutoResponse(): WebSocketRequestResponsePair | null; + getWebSocketAutoResponseTimestamp(ws: WebSocket): Date | null; + setHibernatableWebSocketEventTimeout(timeoutMs?: number): void; + getHibernatableWebSocketEventTimeout(): number | null; + getTags(ws: WebSocket): string[]; + abort(reason?: string): void; +} +interface DurableObjectTransaction { + get(key: string, options?: DurableObjectGetOptions): Promise; + get(keys: string[], options?: DurableObjectGetOptions): Promise>; + list(options?: DurableObjectListOptions): Promise>; + put(key: string, value: T, options?: DurableObjectPutOptions): Promise; + put(entries: Record, options?: DurableObjectPutOptions): Promise; + delete(key: string, options?: DurableObjectPutOptions): Promise; + delete(keys: string[], options?: DurableObjectPutOptions): Promise; + rollback(): void; + getAlarm(options?: DurableObjectGetAlarmOptions): Promise; + setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise; + deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise; +} +interface DurableObjectStorage { + get(key: string, options?: DurableObjectGetOptions): Promise; + get(keys: string[], options?: DurableObjectGetOptions): Promise>; + list(options?: DurableObjectListOptions): Promise>; + put(key: string, value: T, options?: DurableObjectPutOptions): Promise; + put(entries: Record, options?: DurableObjectPutOptions): Promise; + delete(key: string, options?: DurableObjectPutOptions): Promise; + delete(keys: string[], options?: DurableObjectPutOptions): Promise; + deleteAll(options?: DurableObjectPutOptions): Promise; + transaction(closure: (txn: DurableObjectTransaction) => Promise): Promise; + getAlarm(options?: DurableObjectGetAlarmOptions): Promise; + setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise; + deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise; + sync(): Promise; + sql: SqlStorage; + kv: SyncKvStorage; + transactionSync(closure: () => T): T; + getCurrentBookmark(): Promise; + getBookmarkForTime(timestamp: number | Date): Promise; + onNextSessionRestoreBookmark(bookmark: string): Promise; +} +interface DurableObjectListOptions { + start?: string; + startAfter?: string; + end?: string; + prefix?: string; + reverse?: boolean; + limit?: number; + allowConcurrency?: boolean; + noCache?: boolean; +} +interface DurableObjectGetOptions { + allowConcurrency?: boolean; + noCache?: boolean; +} +interface DurableObjectGetAlarmOptions { + allowConcurrency?: boolean; +} +interface DurableObjectPutOptions { + allowConcurrency?: boolean; + allowUnconfirmed?: boolean; + noCache?: boolean; +} +interface DurableObjectSetAlarmOptions { + allowConcurrency?: boolean; + allowUnconfirmed?: boolean; +} +declare class WebSocketRequestResponsePair { + constructor(request: string, response: string); + get request(): string; + get response(): string; +} +interface DurableObjectFacets { + get(name: string, getStartupOptions: () => FacetStartupOptions | Promise>): Fetcher; + abort(name: string, reason: any): void; + delete(name: string): void; +} +interface FacetStartupOptions { + id?: DurableObjectId | string; + class: DurableObjectClass; +} +interface AnalyticsEngineDataset { + writeDataPoint(event?: AnalyticsEngineDataPoint): void; +} +interface AnalyticsEngineDataPoint { + indexes?: ((ArrayBuffer | string) | null)[]; + doubles?: number[]; + blobs?: ((ArrayBuffer | string) | null)[]; +} +/** + * The **`Event`** interface represents an event which takes place on an `EventTarget`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event) + */ +declare class Event { + constructor(type: string, init?: EventInit); + /** + * The **`type`** read-only property of the Event interface returns a string containing the event's type. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/type) + */ + get type(): string; + /** + * The **`eventPhase`** read-only property of the being evaluated. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/eventPhase) + */ + get eventPhase(): number; + /** + * The read-only **`composed`** property of the or not the event will propagate across the shadow DOM boundary into the standard DOM. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composed) + */ + get composed(): boolean; + /** + * The **`bubbles`** read-only property of the Event interface indicates whether the event bubbles up through the DOM tree or not. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/bubbles) + */ + get bubbles(): boolean; + /** + * The **`cancelable`** read-only property of the Event interface indicates whether the event can be canceled, and therefore prevented as if the event never happened. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelable) + */ + get cancelable(): boolean; + /** + * The **`defaultPrevented`** read-only property of the Event interface returns a boolean value indicating whether or not the call to Event.preventDefault() canceled the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/defaultPrevented) + */ + get defaultPrevented(): boolean; + /** + * The Event property **`returnValue`** indicates whether the default action for this event has been prevented or not. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/returnValue) + */ + get returnValue(): boolean; + /** + * The **`currentTarget`** read-only property of the Event interface identifies the element to which the event handler has been attached. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/currentTarget) + */ + get currentTarget(): EventTarget | undefined; + /** + * The read-only **`target`** property of the dispatched. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/target) + */ + get target(): EventTarget | undefined; + /** + * The deprecated **`Event.srcElement`** is an alias for the Event.target property. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/srcElement) + */ + get srcElement(): EventTarget | undefined; + /** + * The **`timeStamp`** read-only property of the Event interface returns the time (in milliseconds) at which the event was created. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/timeStamp) + */ + get timeStamp(): number; + /** + * The **`isTrusted`** read-only property of the when the event was generated by the user agent (including via user actions and programmatic methods such as HTMLElement.focus()), and `false` when the event was dispatched via The only exception is the `click` event, which initializes the `isTrusted` property to `false` in user agents. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/isTrusted) + */ + get isTrusted(): boolean; + /** + * The **`cancelBubble`** property of the Event interface is deprecated. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) + */ + get cancelBubble(): boolean; + /** + * The **`cancelBubble`** property of the Event interface is deprecated. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) + */ + set cancelBubble(value: boolean); + /** + * The **`stopImmediatePropagation()`** method of the If several listeners are attached to the same element for the same event type, they are called in the order in which they were added. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopImmediatePropagation) + */ + stopImmediatePropagation(): void; + /** + * The **`preventDefault()`** method of the Event interface tells the user agent that if the event does not get explicitly handled, its default action should not be taken as it normally would be. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/preventDefault) + */ + preventDefault(): void; + /** + * The **`stopPropagation()`** method of the Event interface prevents further propagation of the current event in the capturing and bubbling phases. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopPropagation) + */ + stopPropagation(): void; + /** + * The **`composedPath()`** method of the Event interface returns the event's path which is an array of the objects on which listeners will be invoked. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composedPath) + */ + composedPath(): EventTarget[]; + static readonly NONE: number; + static readonly CAPTURING_PHASE: number; + static readonly AT_TARGET: number; + static readonly BUBBLING_PHASE: number; +} +interface EventInit { + bubbles?: boolean; + cancelable?: boolean; + composed?: boolean; +} +type EventListener = (event: EventType) => void; +interface EventListenerObject { + handleEvent(event: EventType): void; +} +type EventListenerOrEventListenerObject = EventListener | EventListenerObject; +/** + * The **`EventTarget`** interface is implemented by objects that can receive events and may have listeners for them. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget) + */ +declare class EventTarget = Record> { + constructor(); + /** + * The **`addEventListener()`** method of the EventTarget interface sets up a function that will be called whenever the specified event is delivered to the target. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/addEventListener) + */ + addEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetAddEventListenerOptions | boolean): void; + /** + * The **`removeEventListener()`** method of the EventTarget interface removes an event listener previously registered with EventTarget.addEventListener() from the target. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/removeEventListener) + */ + removeEventListener(type: Type, handler: EventListenerOrEventListenerObject, options?: EventTargetEventListenerOptions | boolean): void; + /** + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) + */ + dispatchEvent(event: EventMap[keyof EventMap]): boolean; +} +interface EventTargetEventListenerOptions { + capture?: boolean; +} +interface EventTargetAddEventListenerOptions { + capture?: boolean; + passive?: boolean; + once?: boolean; + signal?: AbortSignal; +} +interface EventTargetHandlerObject { + handleEvent: (event: Event) => any | undefined; +} +/** + * The **`AbortController`** interface represents a controller object that allows you to abort one or more Web requests as and when desired. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController) + */ +declare class AbortController { + constructor(); + /** + * The **`signal`** read-only property of the AbortController interface returns an AbortSignal object instance, which can be used to communicate with/abort an asynchronous operation as desired. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/signal) + */ + get signal(): AbortSignal; + /** + * The **`abort()`** method of the AbortController interface aborts an asynchronous operation before it has completed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/abort) + */ + abort(reason?: any): void; +} +/** + * The **`AbortSignal`** interface represents a signal object that allows you to communicate with an asynchronous operation (such as a fetch request) and abort it if required via an AbortController object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal) + */ +declare abstract class AbortSignal extends EventTarget { + /** + * The **`AbortSignal.abort()`** static method returns an AbortSignal that is already set as aborted (and which does not trigger an AbortSignal/abort_event event). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_static) + */ + static abort(reason?: any): AbortSignal; + /** + * The **`AbortSignal.timeout()`** static method returns an AbortSignal that will automatically abort after a specified time. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/timeout_static) + */ + static timeout(delay: number): AbortSignal; + /** + * The **`AbortSignal.any()`** static method takes an iterable of abort signals and returns an AbortSignal. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/any_static) + */ + static any(signals: AbortSignal[]): AbortSignal; + /** + * The **`aborted`** read-only property returns a value that indicates whether the asynchronous operations the signal is communicating with are aborted (`true`) or not (`false`). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/aborted) + */ + get aborted(): boolean; + /** + * The **`reason`** read-only property returns a JavaScript value that indicates the abort reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/reason) + */ + get reason(): any; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ + get onabort(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ + set onabort(value: any | null); + /** + * The **`throwIfAborted()`** method throws the signal's abort AbortSignal.reason if the signal has been aborted; otherwise it does nothing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/throwIfAborted) + */ + throwIfAborted(): void; +} +interface Scheduler { + wait(delay: number, maybeOptions?: SchedulerWaitOptions): Promise; +} +interface SchedulerWaitOptions { + signal?: AbortSignal; +} +/** + * The **`ExtendableEvent`** interface extends the lifetime of the `install` and `activate` events dispatched on the global scope as part of the service worker lifecycle. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent) + */ +declare abstract class ExtendableEvent extends Event { + /** + * The **`ExtendableEvent.waitUntil()`** method tells the event dispatcher that work is ongoing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent/waitUntil) + */ + waitUntil(promise: Promise): void; +} +/** + * The **`CustomEvent`** interface represents events initialized by an application for any purpose. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent) + */ +declare class CustomEvent extends Event { + constructor(type: string, init?: CustomEventCustomEventInit); + /** + * The read-only **`detail`** property of the CustomEvent interface returns any data passed when initializing the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent/detail) + */ + get detail(): T; +} +interface CustomEventCustomEventInit { + bubbles?: boolean; + cancelable?: boolean; + composed?: boolean; + detail?: any; +} +/** + * The **`Blob`** interface represents a blob, which is a file-like object of immutable, raw data; they can be read as text or binary data, or converted into a ReadableStream so its methods can be used for processing the data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob) + */ +declare class Blob { + constructor(bits?: ((ArrayBuffer | ArrayBufferView) | string | Blob)[], options?: BlobOptions); + /** + * The **`size`** read-only property of the Blob interface returns the size of the Blob or File in bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) + */ + get size(): number; + /** + * The **`type`** read-only property of the Blob interface returns the MIME type of the file. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) + */ + get type(): string; + /** + * The **`slice()`** method of the Blob interface creates and returns a new `Blob` object which contains data from a subset of the blob on which it's called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) + */ + slice(start?: number, end?: number, type?: string): Blob; + /** + * The **`arrayBuffer()`** method of the Blob interface returns a Promise that resolves with the contents of the blob as binary data contained in an ArrayBuffer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer) + */ + arrayBuffer(): Promise; + /** + * The **`bytes()`** method of the Blob interface returns a Promise that resolves with a Uint8Array containing the contents of the blob as an array of bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes) + */ + bytes(): Promise; + /** + * The **`text()`** method of the string containing the contents of the blob, interpreted as UTF-8. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) + */ + text(): Promise; + /** + * The **`stream()`** method of the Blob interface returns a ReadableStream which upon reading returns the data contained within the `Blob`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/stream) + */ + stream(): ReadableStream; +} +interface BlobOptions { + type?: string; +} +/** + * The **`File`** interface provides information about files and allows JavaScript in a web page to access their content. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File) + */ +declare class File extends Blob { + constructor(bits: ((ArrayBuffer | ArrayBufferView) | string | Blob)[] | undefined, name: string, options?: FileOptions); + /** + * The **`name`** read-only property of the File interface returns the name of the file represented by a File object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) + */ + get name(): string; + /** + * The **`lastModified`** read-only property of the File interface provides the last modified date of the file as the number of milliseconds since the Unix epoch (January 1, 1970 at midnight). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) + */ + get lastModified(): number; +} +interface FileOptions { + type?: string; + lastModified?: number; +} +/** +* The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) +*/ +declare abstract class CacheStorage { + /** + * The **`open()`** method of the the Cache object matching the `cacheName`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CacheStorage/open) + */ + open(cacheName: string): Promise; + readonly default: Cache; +} +/** +* The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) +*/ +declare abstract class Cache { + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#delete) */ + delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#match) */ + match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#put) */ + put(request: RequestInfo | URL, response: Response): Promise; +} +interface CacheQueryOptions { + ignoreMethod?: boolean; +} +/** +* The Web Crypto API provides a set of low-level functions for common cryptographic tasks. +* The Workers runtime implements the full surface of this API, but with some differences in +* the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms) +* compared to those implemented in most browsers. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/) +*/ +declare abstract class Crypto { + /** + * The **`Crypto.subtle`** read-only property returns a cryptographic operations. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/subtle) + */ + get subtle(): SubtleCrypto; + /** + * The **`Crypto.getRandomValues()`** method lets you get cryptographically strong random values. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) + */ + getRandomValues(buffer: T): T; + /** + * The **`randomUUID()`** method of the Crypto interface is used to generate a v4 UUID using a cryptographically secure random number generator. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID) + */ + randomUUID(): string; + DigestStream: typeof DigestStream; +} +/** + * The **`SubtleCrypto`** interface of the Web Crypto API provides a number of low-level cryptographic functions. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto) + */ +declare abstract class SubtleCrypto { + /** + * The **`encrypt()`** method of the SubtleCrypto interface encrypts data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/encrypt) + */ + encrypt(algorithm: string | SubtleCryptoEncryptAlgorithm, key: CryptoKey, plainText: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`decrypt()`** method of the SubtleCrypto interface decrypts some encrypted data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/decrypt) + */ + decrypt(algorithm: string | SubtleCryptoEncryptAlgorithm, key: CryptoKey, cipherText: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`sign()`** method of the SubtleCrypto interface generates a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/sign) + */ + sign(algorithm: string | SubtleCryptoSignAlgorithm, key: CryptoKey, data: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`verify()`** method of the SubtleCrypto interface verifies a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/verify) + */ + verify(algorithm: string | SubtleCryptoSignAlgorithm, key: CryptoKey, signature: ArrayBuffer | ArrayBufferView, data: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`digest()`** method of the SubtleCrypto interface generates a _digest_ of the given data, using the specified hash function. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/digest) + */ + digest(algorithm: string | SubtleCryptoHashAlgorithm, data: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`generateKey()`** method of the SubtleCrypto interface is used to generate a new key (for symmetric algorithms) or key pair (for public-key algorithms). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/generateKey) + */ + generateKey(algorithm: string | SubtleCryptoGenerateKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; + /** + * The **`deriveKey()`** method of the SubtleCrypto interface can be used to derive a secret key from a master key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveKey) + */ + deriveKey(algorithm: string | SubtleCryptoDeriveKeyAlgorithm, baseKey: CryptoKey, derivedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; + /** + * The **`deriveBits()`** method of the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveBits) + */ + deriveBits(algorithm: string | SubtleCryptoDeriveKeyAlgorithm, baseKey: CryptoKey, length?: number | null): Promise; + /** + * The **`importKey()`** method of the SubtleCrypto interface imports a key: that is, it takes as input a key in an external, portable format and gives you a CryptoKey object that you can use in the Web Crypto API. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/importKey) + */ + importKey(format: string, keyData: (ArrayBuffer | ArrayBufferView) | JsonWebKey, algorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; + /** + * The **`exportKey()`** method of the SubtleCrypto interface exports a key: that is, it takes as input a CryptoKey object and gives you the key in an external, portable format. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/exportKey) + */ + exportKey(format: string, key: CryptoKey): Promise; + /** + * The **`wrapKey()`** method of the SubtleCrypto interface 'wraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/wrapKey) + */ + wrapKey(format: string, key: CryptoKey, wrappingKey: CryptoKey, wrapAlgorithm: string | SubtleCryptoEncryptAlgorithm): Promise; + /** + * The **`unwrapKey()`** method of the SubtleCrypto interface 'unwraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/unwrapKey) + */ + unwrapKey(format: string, wrappedKey: ArrayBuffer | ArrayBufferView, unwrappingKey: CryptoKey, unwrapAlgorithm: string | SubtleCryptoEncryptAlgorithm, unwrappedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, extractable: boolean, keyUsages: string[]): Promise; + timingSafeEqual(a: ArrayBuffer | ArrayBufferView, b: ArrayBuffer | ArrayBufferView): boolean; +} +/** + * The **`CryptoKey`** interface of the Web Crypto API represents a cryptographic key obtained from one of the SubtleCrypto methods SubtleCrypto.generateKey, SubtleCrypto.deriveKey, SubtleCrypto.importKey, or SubtleCrypto.unwrapKey. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey) + */ +declare abstract class CryptoKey { + /** + * The read-only **`type`** property of the CryptoKey interface indicates which kind of key is represented by the object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/type) + */ + readonly type: string; + /** + * The read-only **`extractable`** property of the CryptoKey interface indicates whether or not the key may be extracted using `SubtleCrypto.exportKey()` or `SubtleCrypto.wrapKey()`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/extractable) + */ + readonly extractable: boolean; + /** + * The read-only **`algorithm`** property of the CryptoKey interface returns an object describing the algorithm for which this key can be used, and any associated extra parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/algorithm) + */ + readonly algorithm: CryptoKeyKeyAlgorithm | CryptoKeyAesKeyAlgorithm | CryptoKeyHmacKeyAlgorithm | CryptoKeyRsaKeyAlgorithm | CryptoKeyEllipticKeyAlgorithm | CryptoKeyArbitraryKeyAlgorithm; + /** + * The read-only **`usages`** property of the CryptoKey interface indicates what can be done with the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/usages) + */ + readonly usages: string[]; +} +interface CryptoKeyPair { + publicKey: CryptoKey; + privateKey: CryptoKey; +} +interface JsonWebKey { + kty: string; + use?: string; + key_ops?: string[]; + alg?: string; + ext?: boolean; + crv?: string; + x?: string; + y?: string; + d?: string; + n?: string; + e?: string; + p?: string; + q?: string; + dp?: string; + dq?: string; + qi?: string; + oth?: RsaOtherPrimesInfo[]; + k?: string; +} +interface RsaOtherPrimesInfo { + r?: string; + d?: string; + t?: string; +} +interface SubtleCryptoDeriveKeyAlgorithm { + name: string; + salt?: (ArrayBuffer | ArrayBufferView); + iterations?: number; + hash?: (string | SubtleCryptoHashAlgorithm); + $public?: CryptoKey; + info?: (ArrayBuffer | ArrayBufferView); +} +interface SubtleCryptoEncryptAlgorithm { + name: string; + iv?: (ArrayBuffer | ArrayBufferView); + additionalData?: (ArrayBuffer | ArrayBufferView); + tagLength?: number; + counter?: (ArrayBuffer | ArrayBufferView); + length?: number; + label?: (ArrayBuffer | ArrayBufferView); +} +interface SubtleCryptoGenerateKeyAlgorithm { + name: string; + hash?: (string | SubtleCryptoHashAlgorithm); + modulusLength?: number; + publicExponent?: (ArrayBuffer | ArrayBufferView); + length?: number; + namedCurve?: string; +} +interface SubtleCryptoHashAlgorithm { + name: string; +} +interface SubtleCryptoImportKeyAlgorithm { + name: string; + hash?: (string | SubtleCryptoHashAlgorithm); + length?: number; + namedCurve?: string; + compressed?: boolean; +} +interface SubtleCryptoSignAlgorithm { + name: string; + hash?: (string | SubtleCryptoHashAlgorithm); + dataLength?: number; + saltLength?: number; +} +interface CryptoKeyKeyAlgorithm { + name: string; +} +interface CryptoKeyAesKeyAlgorithm { + name: string; + length: number; +} +interface CryptoKeyHmacKeyAlgorithm { + name: string; + hash: CryptoKeyKeyAlgorithm; + length: number; +} +interface CryptoKeyRsaKeyAlgorithm { + name: string; + modulusLength: number; + publicExponent: ArrayBuffer | ArrayBufferView; + hash?: CryptoKeyKeyAlgorithm; +} +interface CryptoKeyEllipticKeyAlgorithm { + name: string; + namedCurve: string; +} +interface CryptoKeyArbitraryKeyAlgorithm { + name: string; + hash?: CryptoKeyKeyAlgorithm; + namedCurve?: string; + length?: number; +} +declare class DigestStream extends WritableStream { + constructor(algorithm: string | SubtleCryptoHashAlgorithm); + readonly digest: Promise; + get bytesWritten(): number | bigint; +} +/** + * The **`TextDecoder`** interface represents a decoder for a specific text encoding, such as `UTF-8`, `ISO-8859-2`, `KOI8-R`, `GBK`, etc. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder) + */ +declare class TextDecoder { + constructor(label?: string, options?: TextDecoderConstructorOptions); + /** + * The **`TextDecoder.decode()`** method returns a string containing text decoded from the buffer passed as a parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder/decode) + */ + decode(input?: (ArrayBuffer | ArrayBufferView), options?: TextDecoderDecodeOptions): string; + get encoding(): string; + get fatal(): boolean; + get ignoreBOM(): boolean; +} +/** + * The **`TextEncoder`** interface takes a stream of code points as input and emits a stream of UTF-8 bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder) + */ +declare class TextEncoder { + constructor(); + /** + * The **`TextEncoder.encode()`** method takes a string as input, and returns a Global_Objects/Uint8Array containing the text given in parameters encoded with the specific method for that TextEncoder object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encode) + */ + encode(input?: string): Uint8Array; + /** + * The **`TextEncoder.encodeInto()`** method takes a string to encode and a destination Uint8Array to put resulting UTF-8 encoded text into, and returns a dictionary object indicating the progress of the encoding. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encodeInto) + */ + encodeInto(input: string, buffer: Uint8Array): TextEncoderEncodeIntoResult; + get encoding(): string; +} +interface TextDecoderConstructorOptions { + fatal: boolean; + ignoreBOM: boolean; +} +interface TextDecoderDecodeOptions { + stream: boolean; +} +interface TextEncoderEncodeIntoResult { + read: number; + written: number; +} +/** + * The **`ErrorEvent`** interface represents events providing information related to errors in scripts or in files. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent) + */ +declare class ErrorEvent extends Event { + constructor(type: string, init?: ErrorEventErrorEventInit); + /** + * The **`filename`** read-only property of the ErrorEvent interface returns a string containing the name of the script file in which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename) + */ + get filename(): string; + /** + * The **`message`** read-only property of the ErrorEvent interface returns a string containing a human-readable error message describing the problem. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message) + */ + get message(): string; + /** + * The **`lineno`** read-only property of the ErrorEvent interface returns an integer containing the line number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno) + */ + get lineno(): number; + /** + * The **`colno`** read-only property of the ErrorEvent interface returns an integer containing the column number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno) + */ + get colno(): number; + /** + * The **`error`** read-only property of the ErrorEvent interface returns a JavaScript value, such as an Error or DOMException, representing the error associated with this event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error) + */ + get error(): any; +} +interface ErrorEventErrorEventInit { + message?: string; + filename?: string; + lineno?: number; + colno?: number; + error?: any; +} +/** + * The **`MessageEvent`** interface represents a message received by a target object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent) + */ +declare class MessageEvent extends Event { + constructor(type: string, initializer: MessageEventInit); + /** + * The **`data`** read-only property of the The data sent by the message emitter; this can be any data type, depending on what originated this event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data) + */ + readonly data: any; + /** + * The **`origin`** read-only property of the origin of the message emitter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/origin) + */ + readonly origin: string | null; + /** + * The **`lastEventId`** read-only property of the unique ID for the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/lastEventId) + */ + readonly lastEventId: string; + /** + * The **`source`** read-only property of the a WindowProxy, MessagePort, or a `MessageEventSource` (which can be a WindowProxy, message emitter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/source) + */ + readonly source: MessagePort | null; + /** + * The **`ports`** read-only property of the containing all MessagePort objects sent with the message, in order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/ports) + */ + readonly ports: MessagePort[]; +} +interface MessageEventInit { + data: ArrayBuffer | string; +} +/** + * The **`PromiseRejectionEvent`** interface represents events which are sent to the global script context when JavaScript Promises are rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent) + */ +declare abstract class PromiseRejectionEvent extends Event { + /** + * The PromiseRejectionEvent interface's **`promise`** read-only property indicates the JavaScript rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/promise) + */ + readonly promise: Promise; + /** + * The PromiseRejectionEvent **`reason`** read-only property is any JavaScript value or Object which provides the reason passed into Promise.reject(). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/reason) + */ + readonly reason: any; +} +/** + * The **`FormData`** interface provides a way to construct a set of key/value pairs representing form fields and their values, which can be sent using the Window/fetch, XMLHttpRequest.send() or navigator.sendBeacon() methods. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData) + */ +declare class FormData { + constructor(); + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: string | Blob): void; + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: string): void; + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: Blob, filename?: string): void; + /** + * The **`delete()`** method of the FormData interface deletes a key and its value(s) from a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/delete) + */ + delete(name: string): void; + /** + * The **`get()`** method of the FormData interface returns the first value associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/get) + */ + get(name: string): (File | string) | null; + /** + * The **`getAll()`** method of the FormData interface returns all the values associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/getAll) + */ + getAll(name: string): (File | string)[]; + /** + * The **`has()`** method of the FormData interface returns whether a `FormData` object contains a certain key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has) + */ + has(name: string): boolean; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: string | Blob): void; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: string): void; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: Blob, filename?: string): void; + /* Returns an array of key, value pairs for every entry in the list. */ + entries(): IterableIterator<[ + key: string, + value: File | string + ]>; + /* Returns a list of keys in the list. */ + keys(): IterableIterator; + /* Returns a list of values in the list. */ + values(): IterableIterator<(File | string)>; + forEach(callback: (this: This, value: File | string, key: string, parent: FormData) => void, thisArg?: This): void; + [Symbol.iterator](): IterableIterator<[ + key: string, + value: File | string + ]>; +} +interface ContentOptions { + html?: boolean; +} +declare class HTMLRewriter { + constructor(); + on(selector: string, handlers: HTMLRewriterElementContentHandlers): HTMLRewriter; + onDocument(handlers: HTMLRewriterDocumentContentHandlers): HTMLRewriter; + transform(response: Response): Response; +} +interface HTMLRewriterElementContentHandlers { + element?(element: Element): void | Promise; + comments?(comment: Comment): void | Promise; + text?(element: Text): void | Promise; +} +interface HTMLRewriterDocumentContentHandlers { + doctype?(doctype: Doctype): void | Promise; + comments?(comment: Comment): void | Promise; + text?(text: Text): void | Promise; + end?(end: DocumentEnd): void | Promise; +} +interface Doctype { + readonly name: string | null; + readonly publicId: string | null; + readonly systemId: string | null; +} +interface Element { + tagName: string; + readonly attributes: IterableIterator; + readonly removed: boolean; + readonly namespaceURI: string; + getAttribute(name: string): string | null; + hasAttribute(name: string): boolean; + setAttribute(name: string, value: string): Element; + removeAttribute(name: string): Element; + before(content: string | ReadableStream | Response, options?: ContentOptions): Element; + after(content: string | ReadableStream | Response, options?: ContentOptions): Element; + prepend(content: string | ReadableStream | Response, options?: ContentOptions): Element; + append(content: string | ReadableStream | Response, options?: ContentOptions): Element; + replace(content: string | ReadableStream | Response, options?: ContentOptions): Element; + remove(): Element; + removeAndKeepContent(): Element; + setInnerContent(content: string | ReadableStream | Response, options?: ContentOptions): Element; + onEndTag(handler: (tag: EndTag) => void | Promise): void; +} +interface EndTag { + name: string; + before(content: string | ReadableStream | Response, options?: ContentOptions): EndTag; + after(content: string | ReadableStream | Response, options?: ContentOptions): EndTag; + remove(): EndTag; +} +interface Comment { + text: string; + readonly removed: boolean; + before(content: string, options?: ContentOptions): Comment; + after(content: string, options?: ContentOptions): Comment; + replace(content: string, options?: ContentOptions): Comment; + remove(): Comment; +} +interface Text { + readonly text: string; + readonly lastInTextNode: boolean; + readonly removed: boolean; + before(content: string | ReadableStream | Response, options?: ContentOptions): Text; + after(content: string | ReadableStream | Response, options?: ContentOptions): Text; + replace(content: string | ReadableStream | Response, options?: ContentOptions): Text; + remove(): Text; +} +interface DocumentEnd { + append(content: string, options?: ContentOptions): DocumentEnd; +} +/** + * This is the event type for `fetch` events dispatched on the ServiceWorkerGlobalScope. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent) + */ +declare abstract class FetchEvent extends ExtendableEvent { + /** + * The **`request`** read-only property of the the event handler. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/request) + */ + readonly request: Request; + /** + * The **`respondWith()`** method of allows you to provide a promise for a Response yourself. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/respondWith) + */ + respondWith(promise: Response | Promise): void; + passThroughOnException(): void; +} +type HeadersInit = Headers | Iterable> | Record; +/** + * The **`Headers`** interface of the Fetch API allows you to perform various actions on HTTP request and response headers. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers) + */ +declare class Headers { + constructor(init?: HeadersInit); + /** + * The **`get()`** method of the Headers interface returns a byte string of all the values of a header within a `Headers` object with a given name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/get) + */ + get(name: string): string | null; + getAll(name: string): string[]; + /** + * The **`getSetCookie()`** method of the Headers interface returns an array containing the values of all Set-Cookie headers associated with a response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/getSetCookie) + */ + getSetCookie(): string[]; + /** + * The **`has()`** method of the Headers interface returns a boolean stating whether a `Headers` object contains a certain header. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/has) + */ + has(name: string): boolean; + /** + * The **`set()`** method of the Headers interface sets a new value for an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/set) + */ + set(name: string, value: string): void; + /** + * The **`append()`** method of the Headers interface appends a new value onto an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the Headers interface deletes a header from the current `Headers` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/delete) + */ + delete(name: string): void; + forEach(callback: (this: This, value: string, key: string, parent: Headers) => void, thisArg?: This): void; + /* Returns an iterator allowing to go through all key/value pairs contained in this object. */ + entries(): IterableIterator<[ + key: string, + value: string + ]>; + /* Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. */ + keys(): IterableIterator; + /* Returns an iterator allowing to go through all values of the key/value pairs contained in this object. */ + values(): IterableIterator; + [Symbol.iterator](): IterableIterator<[ + key: string, + value: string + ]>; +} +type BodyInit = ReadableStream | string | ArrayBuffer | ArrayBufferView | Blob | URLSearchParams | FormData | Iterable | AsyncIterable; +declare abstract class Body { + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/body) */ + get body(): ReadableStream | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bodyUsed) */ + get bodyUsed(): boolean; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/arrayBuffer) */ + arrayBuffer(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bytes) */ + bytes(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/text) */ + text(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/json) */ + json(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/formData) */ + formData(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/blob) */ + blob(): Promise; +} +/** + * The **`Response`** interface of the Fetch API represents the response to a request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) + */ +declare var Response: { + prototype: Response; + new (body?: BodyInit | null, init?: ResponseInit): Response; + error(): Response; + redirect(url: string, status?: number): Response; + json(any: any, maybeInit?: (ResponseInit | Response)): Response; +}; +/** + * The **`Response`** interface of the Fetch API represents the response to a request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) + */ +interface Response extends Body { + /** + * The **`clone()`** method of the Response interface creates a clone of a response object, identical in every way, but stored in a different variable. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/clone) + */ + clone(): Response; + /** + * The **`status`** read-only property of the Response interface contains the HTTP status codes of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/status) + */ + status: number; + /** + * The **`statusText`** read-only property of the Response interface contains the status message corresponding to the HTTP status code in Response.status. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/statusText) + */ + statusText: string; + /** + * The **`headers`** read-only property of the with the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/headers) + */ + headers: Headers; + /** + * The **`ok`** read-only property of the Response interface contains a Boolean stating whether the response was successful (status in the range 200-299) or not. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/ok) + */ + ok: boolean; + /** + * The **`redirected`** read-only property of the Response interface indicates whether or not the response is the result of a request you made which was redirected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/redirected) + */ + redirected: boolean; + /** + * The **`url`** read-only property of the Response interface contains the URL of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/url) + */ + url: string; + webSocket: WebSocket | null; + cf: any | undefined; + /** + * The **`type`** read-only property of the Response interface contains the type of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/type) + */ + type: "default" | "error"; +} +interface ResponseInit { + status?: number; + statusText?: string; + headers?: HeadersInit; + cf?: any; + webSocket?: (WebSocket | null); + encodeBody?: "automatic" | "manual"; +} +type RequestInfo> = Request | string; +/** + * The **`Request`** interface of the Fetch API represents a resource request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) + */ +declare var Request: { + prototype: Request; + new >(input: RequestInfo | URL, init?: RequestInit): Request; +}; +/** + * The **`Request`** interface of the Fetch API represents a resource request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) + */ +interface Request> extends Body { + /** + * The **`clone()`** method of the Request interface creates a copy of the current `Request` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/clone) + */ + clone(): Request; + /** + * The **`method`** read-only property of the `POST`, etc.) A String indicating the method of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/method) + */ + method: string; + /** + * The **`url`** read-only property of the Request interface contains the URL of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/url) + */ + url: string; + /** + * The **`headers`** read-only property of the with the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/headers) + */ + headers: Headers; + /** + * The **`redirect`** read-only property of the Request interface contains the mode for how redirects are handled. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/redirect) + */ + redirect: string; + fetcher: Fetcher | null; + /** + * The read-only **`signal`** property of the Request interface returns the AbortSignal associated with the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/signal) + */ + signal: AbortSignal; + cf?: Cf; + /** + * The **`integrity`** read-only property of the Request interface contains the subresource integrity value of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/integrity) + */ + integrity: string; + /** + * The **`keepalive`** read-only property of the Request interface contains the request's `keepalive` setting (`true` or `false`), which indicates whether the browser will keep the associated request alive if the page that initiated it is unloaded before the request is complete. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/keepalive) + */ + keepalive: boolean; + /** + * The **`cache`** read-only property of the Request interface contains the cache mode of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/cache) + */ + cache?: "no-store" | "no-cache"; +} +interface RequestInit { + /* A string to set request's method. */ + method?: string; + /* A Headers object, an object literal, or an array of two-item arrays to set request's headers. */ + headers?: HeadersInit; + /* A BodyInit object or null to set request's body. */ + body?: BodyInit | null; + /* A string indicating whether request follows redirects, results in an error upon encountering a redirect, or returns the redirect (in an opaque fashion). Sets request's redirect. */ + redirect?: string; + fetcher?: (Fetcher | null); + cf?: Cf; + /* A string indicating how the request will interact with the browser's cache to set request's cache. */ + cache?: "no-store" | "no-cache"; + /* A cryptographic hash of the resource to be fetched by request. Sets request's integrity. */ + integrity?: string; + /* An AbortSignal to set request's signal. */ + signal?: (AbortSignal | null); + encodeResponseBody?: "automatic" | "manual"; +} +type Service Rpc.WorkerEntrypointBranded) | Rpc.WorkerEntrypointBranded | ExportedHandler | undefined = undefined> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded ? Fetcher> : T extends Rpc.WorkerEntrypointBranded ? Fetcher : T extends Exclude ? never : Fetcher; +type Fetcher = (T extends Rpc.EntrypointBranded ? Rpc.Provider : unknown) & { + fetch(input: RequestInfo | URL, init?: RequestInit): Promise; + connect(address: SocketAddress | string, options?: SocketOptions): Socket; +}; +interface KVNamespaceListKey { + name: Key; + expiration?: number; + metadata?: Metadata; +} +type KVNamespaceListResult = { + list_complete: false; + keys: KVNamespaceListKey[]; + cursor: string; + cacheStatus: string | null; +} | { + list_complete: true; + keys: KVNamespaceListKey[]; + cacheStatus: string | null; +}; +interface KVNamespace { + get(key: Key, options?: Partial>): Promise; + get(key: Key, type: "text"): Promise; + get(key: Key, type: "json"): Promise; + get(key: Key, type: "arrayBuffer"): Promise; + get(key: Key, type: "stream"): Promise; + get(key: Key, options?: KVNamespaceGetOptions<"text">): Promise; + get(key: Key, options?: KVNamespaceGetOptions<"json">): Promise; + get(key: Key, options?: KVNamespaceGetOptions<"arrayBuffer">): Promise; + get(key: Key, options?: KVNamespaceGetOptions<"stream">): Promise; + get(key: Array, type: "text"): Promise>; + get(key: Array, type: "json"): Promise>; + get(key: Array, options?: Partial>): Promise>; + get(key: Array, options?: KVNamespaceGetOptions<"text">): Promise>; + get(key: Array, options?: KVNamespaceGetOptions<"json">): Promise>; + list(options?: KVNamespaceListOptions): Promise>; + put(key: Key, value: string | ArrayBuffer | ArrayBufferView | ReadableStream, options?: KVNamespacePutOptions): Promise; + getWithMetadata(key: Key, options?: Partial>): Promise>; + getWithMetadata(key: Key, type: "text"): Promise>; + getWithMetadata(key: Key, type: "json"): Promise>; + getWithMetadata(key: Key, type: "arrayBuffer"): Promise>; + getWithMetadata(key: Key, type: "stream"): Promise>; + getWithMetadata(key: Key, options: KVNamespaceGetOptions<"text">): Promise>; + getWithMetadata(key: Key, options: KVNamespaceGetOptions<"json">): Promise>; + getWithMetadata(key: Key, options: KVNamespaceGetOptions<"arrayBuffer">): Promise>; + getWithMetadata(key: Key, options: KVNamespaceGetOptions<"stream">): Promise>; + getWithMetadata(key: Array, type: "text"): Promise>>; + getWithMetadata(key: Array, type: "json"): Promise>>; + getWithMetadata(key: Array, options?: Partial>): Promise>>; + getWithMetadata(key: Array, options?: KVNamespaceGetOptions<"text">): Promise>>; + getWithMetadata(key: Array, options?: KVNamespaceGetOptions<"json">): Promise>>; + delete(key: Key): Promise; +} +interface KVNamespaceListOptions { + limit?: number; + prefix?: (string | null); + cursor?: (string | null); +} +interface KVNamespaceGetOptions { + type: Type; + cacheTtl?: number; +} +interface KVNamespacePutOptions { + expiration?: number; + expirationTtl?: number; + metadata?: (any | null); +} +interface KVNamespaceGetWithMetadataResult { + value: Value | null; + metadata: Metadata | null; + cacheStatus: string | null; +} +type QueueContentType = "text" | "bytes" | "json" | "v8"; +interface Queue { + metrics(): Promise; + send(message: Body, options?: QueueSendOptions): Promise; + sendBatch(messages: Iterable>, options?: QueueSendBatchOptions): Promise; +} +interface QueueSendMetrics { + backlogCount: number; + backlogBytes: number; + oldestMessageTimestamp?: Date; +} +interface QueueSendMetadata { + metrics: QueueSendMetrics; +} +interface QueueSendResponse { + metadata: QueueSendMetadata; +} +interface QueueSendBatchMetrics { + backlogCount: number; + backlogBytes: number; + oldestMessageTimestamp?: Date; +} +interface QueueSendBatchMetadata { + metrics: QueueSendBatchMetrics; +} +interface QueueSendBatchResponse { + metadata: QueueSendBatchMetadata; +} +interface QueueSendOptions { + contentType?: QueueContentType; + delaySeconds?: number; +} +interface QueueSendBatchOptions { + delaySeconds?: number; +} +interface MessageSendRequest { + body: Body; + contentType?: QueueContentType; + delaySeconds?: number; +} +interface QueueMetrics { + backlogCount: number; + backlogBytes: number; + oldestMessageTimestamp?: Date; +} +interface MessageBatchMetrics { + backlogCount: number; + backlogBytes: number; + oldestMessageTimestamp?: Date; +} +interface MessageBatchMetadata { + metrics: MessageBatchMetrics; +} +interface QueueRetryOptions { + delaySeconds?: number; +} +interface Message { + readonly id: string; + readonly timestamp: Date; + readonly body: Body; + readonly attempts: number; + retry(options?: QueueRetryOptions): void; + ack(): void; +} +interface QueueEvent extends ExtendableEvent { + readonly messages: readonly Message[]; + readonly queue: string; + readonly metadata: MessageBatchMetadata; + retryAll(options?: QueueRetryOptions): void; + ackAll(): void; +} +interface MessageBatch { + readonly messages: readonly Message[]; + readonly queue: string; + readonly metadata: MessageBatchMetadata; + retryAll(options?: QueueRetryOptions): void; + ackAll(): void; +} +interface R2Error extends Error { + readonly name: string; + readonly code: number; + readonly message: string; + readonly action: string; + readonly stack: any; +} +interface R2ListOptions { + limit?: number; + prefix?: string; + cursor?: string; + delimiter?: string; + startAfter?: string; + include?: ("httpMetadata" | "customMetadata")[]; +} +interface R2Bucket { + head(key: string): Promise; + get(key: string, options: R2GetOptions & { + onlyIf: R2Conditional | Headers; + }): Promise; + get(key: string, options?: R2GetOptions): Promise; + put(key: string, value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob, options?: R2PutOptions & { + onlyIf: R2Conditional | Headers; + }): Promise; + put(key: string, value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob, options?: R2PutOptions): Promise; + createMultipartUpload(key: string, options?: R2MultipartOptions): Promise; + resumeMultipartUpload(key: string, uploadId: string): R2MultipartUpload; + delete(keys: string | string[]): Promise; + list(options?: R2ListOptions): Promise; +} +interface R2MultipartUpload { + readonly key: string; + readonly uploadId: string; + uploadPart(partNumber: number, value: ReadableStream | (ArrayBuffer | ArrayBufferView) | string | Blob, options?: R2UploadPartOptions): Promise; + abort(): Promise; + complete(uploadedParts: R2UploadedPart[]): Promise; +} +interface R2UploadedPart { + partNumber: number; + etag: string; +} +declare abstract class R2Object { + readonly key: string; + readonly version: string; + readonly size: number; + readonly etag: string; + readonly httpEtag: string; + readonly checksums: R2Checksums; + readonly uploaded: Date; + readonly httpMetadata?: R2HTTPMetadata; + readonly customMetadata?: Record; + readonly range?: R2Range; + readonly storageClass: string; + readonly ssecKeyMd5?: string; + writeHttpMetadata(headers: Headers): void; +} +interface R2ObjectBody extends R2Object { + get body(): ReadableStream; + get bodyUsed(): boolean; + arrayBuffer(): Promise; + bytes(): Promise; + text(): Promise; + json(): Promise; + blob(): Promise; +} +type R2Range = { + offset: number; + length?: number; +} | { + offset?: number; + length: number; +} | { + suffix: number; +}; +interface R2Conditional { + etagMatches?: string; + etagDoesNotMatch?: string; + uploadedBefore?: Date; + uploadedAfter?: Date; + secondsGranularity?: boolean; +} +interface R2GetOptions { + onlyIf?: (R2Conditional | Headers); + range?: (R2Range | Headers); + ssecKey?: (ArrayBuffer | string); +} +interface R2PutOptions { + onlyIf?: (R2Conditional | Headers); + httpMetadata?: (R2HTTPMetadata | Headers); + customMetadata?: Record; + md5?: ((ArrayBuffer | ArrayBufferView) | string); + sha1?: ((ArrayBuffer | ArrayBufferView) | string); + sha256?: ((ArrayBuffer | ArrayBufferView) | string); + sha384?: ((ArrayBuffer | ArrayBufferView) | string); + sha512?: ((ArrayBuffer | ArrayBufferView) | string); + storageClass?: string; + ssecKey?: (ArrayBuffer | string); +} +interface R2MultipartOptions { + httpMetadata?: (R2HTTPMetadata | Headers); + customMetadata?: Record; + storageClass?: string; + ssecKey?: (ArrayBuffer | string); +} +interface R2Checksums { + readonly md5?: ArrayBuffer; + readonly sha1?: ArrayBuffer; + readonly sha256?: ArrayBuffer; + readonly sha384?: ArrayBuffer; + readonly sha512?: ArrayBuffer; + toJSON(): R2StringChecksums; +} +interface R2StringChecksums { + md5?: string; + sha1?: string; + sha256?: string; + sha384?: string; + sha512?: string; +} +interface R2HTTPMetadata { + contentType?: string; + contentLanguage?: string; + contentDisposition?: string; + contentEncoding?: string; + cacheControl?: string; + cacheExpiry?: Date; +} +type R2Objects = { + objects: R2Object[]; + delimitedPrefixes: string[]; +} & ({ + truncated: true; + cursor: string; +} | { + truncated: false; +}); +interface R2UploadPartOptions { + ssecKey?: (ArrayBuffer | string); +} +declare abstract class ScheduledEvent extends ExtendableEvent { + readonly scheduledTime: number; + readonly cron: string; + noRetry(): void; +} +interface ScheduledController { + readonly scheduledTime: number; + readonly cron: string; + noRetry(): void; +} +interface QueuingStrategy { + highWaterMark?: (number | bigint); + size?: (chunk: T) => number | bigint; +} +interface UnderlyingSink { + type?: string; + start?: (controller: WritableStreamDefaultController) => void | Promise; + write?: (chunk: W, controller: WritableStreamDefaultController) => void | Promise; + abort?: (reason: any) => void | Promise; + close?: () => void | Promise; +} +interface UnderlyingByteSource { + type: "bytes"; + autoAllocateChunkSize?: number; + start?: (controller: ReadableByteStreamController) => void | Promise; + pull?: (controller: ReadableByteStreamController) => void | Promise; + cancel?: (reason: any) => void | Promise; +} +interface UnderlyingSource { + type?: "" | undefined; + start?: (controller: ReadableStreamDefaultController) => void | Promise; + pull?: (controller: ReadableStreamDefaultController) => void | Promise; + cancel?: (reason: any) => void | Promise; + expectedLength?: (number | bigint); +} +interface Transformer { + readableType?: string; + writableType?: string; + start?: (controller: TransformStreamDefaultController) => void | Promise; + transform?: (chunk: I, controller: TransformStreamDefaultController) => void | Promise; + flush?: (controller: TransformStreamDefaultController) => void | Promise; + cancel?: (reason: any) => void | Promise; + expectedLength?: number; +} +interface StreamPipeOptions { + preventAbort?: boolean; + preventCancel?: boolean; + /** + * Pipes this readable stream to a given writable stream destination. The way in which the piping process behaves under various error conditions can be customized with a number of passed options. It returns a promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. + * + * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader. + * + * Errors and closures of the source and destination streams propagate as follows: + * + * An error in this source readable stream will abort destination, unless preventAbort is truthy. The returned promise will be rejected with the source's error, or with any error that occurs during aborting the destination. + * + * An error in destination will cancel this source readable stream, unless preventCancel is truthy. The returned promise will be rejected with the destination's error, or with any error that occurs during canceling the source. + * + * When this source readable stream closes, destination will be closed, unless preventClose is truthy. The returned promise will be fulfilled once this process completes, unless an error is encountered while closing the destination, in which case it will be rejected with that error. + * + * If destination starts out closed or closing, this source readable stream will be canceled, unless preventCancel is true. The returned promise will be rejected with an error indicating piping to a closed stream failed, or with any error that occurs during canceling the source. + * + * The signal option can be set to an AbortSignal to allow aborting an ongoing pipe operation via the corresponding AbortController. In this case, this source readable stream will be canceled, and destination aborted, unless the respective options preventCancel or preventAbort are set. + */ + preventClose?: boolean; + signal?: AbortSignal; +} +type ReadableStreamReadResult = { + done: false; + value: R; +} | { + done: true; + value?: undefined; +}; +/** + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) + */ +interface ReadableStream { + /** + * The **`locked`** read-only property of the ReadableStream interface returns whether or not the readable stream is locked to a reader. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/locked) + */ + get locked(): boolean; + /** + * The **`cancel()`** method of the ReadableStream interface returns a Promise that resolves when the stream is canceled. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/cancel) + */ + cancel(reason?: any): Promise; + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ + getReader(): ReadableStreamDefaultReader; + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ + getReader(options: ReadableStreamGetReaderOptions): ReadableStreamBYOBReader; + /** + * The **`pipeThrough()`** method of the ReadableStream interface provides a chainable way of piping the current stream through a transform stream or any other writable/readable pair. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeThrough) + */ + pipeThrough(transform: ReadableWritablePair, options?: StreamPipeOptions): ReadableStream; + /** + * The **`pipeTo()`** method of the ReadableStream interface pipes the current `ReadableStream` to a given WritableStream and returns a Promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeTo) + */ + pipeTo(destination: WritableStream, options?: StreamPipeOptions): Promise; + /** + * The **`tee()`** method of the two-element array containing the two resulting branches as new ReadableStream instances. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/tee) + */ + tee(): [ + ReadableStream, + ReadableStream + ]; + values(options?: ReadableStreamValuesOptions): AsyncIterableIterator; + [Symbol.asyncIterator](options?: ReadableStreamValuesOptions): AsyncIterableIterator; +} +/** + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) + */ +declare const ReadableStream: { + prototype: ReadableStream; + new (underlyingSource: UnderlyingByteSource, strategy?: QueuingStrategy): ReadableStream; + new (underlyingSource?: UnderlyingSource, strategy?: QueuingStrategy): ReadableStream; +}; +/** + * The **`ReadableStreamDefaultReader`** interface of the Streams API represents a default reader that can be used to read stream data supplied from a network (such as a fetch request). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader) + */ +declare class ReadableStreamDefaultReader { + constructor(stream: ReadableStream); + get closed(): Promise; + cancel(reason?: any): Promise; + /** + * The **`read()`** method of the ReadableStreamDefaultReader interface returns a Promise providing access to the next chunk in the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/read) + */ + read(): Promise>; + /** + * The **`releaseLock()`** method of the ReadableStreamDefaultReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/releaseLock) + */ + releaseLock(): void; +} +/** + * The `ReadableStreamBYOBReader` interface of the Streams API defines a reader for a ReadableStream that supports zero-copy reading from an underlying byte source. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader) + */ +declare class ReadableStreamBYOBReader { + constructor(stream: ReadableStream); + get closed(): Promise; + cancel(reason?: any): Promise; + /** + * The **`read()`** method of the ReadableStreamBYOBReader interface is used to read data into a view on a user-supplied buffer from an associated readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/read) + */ + read(view: T): Promise>; + /** + * The **`releaseLock()`** method of the ReadableStreamBYOBReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/releaseLock) + */ + releaseLock(): void; + readAtLeast(minElements: number, view: T): Promise>; +} +interface ReadableStreamBYOBReaderReadableStreamBYOBReaderReadOptions { + min?: number; +} +interface ReadableStreamGetReaderOptions { + /** + * Creates a ReadableStreamBYOBReader and locks the stream to the new reader. + * + * This call behaves the same way as the no-argument variant, except that it only works on readable byte streams, i.e. streams which were constructed specifically with the ability to handle "bring your own buffer" reading. The returned BYOB reader provides the ability to directly read individual chunks from the stream via its read() method, into developer-supplied buffers, allowing more precise control over allocation. + */ + mode: "byob"; +} +/** + * The **`ReadableStreamBYOBRequest`** interface of the Streams API represents a 'pull request' for data from an underlying source that will made as a zero-copy transfer to a consumer (bypassing the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest) + */ +declare abstract class ReadableStreamBYOBRequest { + /** + * The **`view`** getter property of the ReadableStreamBYOBRequest interface returns the current view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/view) + */ + get view(): Uint8Array | null; + /** + * The **`respond()`** method of the ReadableStreamBYOBRequest interface is used to signal to the associated readable byte stream that the specified number of bytes were written into the ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respond) + */ + respond(bytesWritten: number): void; + /** + * The **`respondWithNewView()`** method of the ReadableStreamBYOBRequest interface specifies a new view that the consumer of the associated readable byte stream should write to instead of ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respondWithNewView) + */ + respondWithNewView(view: ArrayBuffer | ArrayBufferView): void; + get atLeast(): number | null; +} +/** + * The **`ReadableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a ReadableStream's state and internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController) + */ +declare abstract class ReadableStreamDefaultController { + /** + * The **`desiredSize`** read-only property of the required to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`close()`** method of the ReadableStreamDefaultController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/close) + */ + close(): void; + /** + * The **`enqueue()`** method of the ```js-nolint enqueue(chunk) ``` - `chunk` - : The chunk to enqueue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/enqueue) + */ + enqueue(chunk?: R): void; + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/error) + */ + error(reason: any): void; +} +/** + * The **`ReadableByteStreamController`** interface of the Streams API represents a controller for a readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController) + */ +declare abstract class ReadableByteStreamController { + /** + * The **`byobRequest`** read-only property of the ReadableByteStreamController interface returns the current BYOB request, or `null` if there are no pending requests. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/byobRequest) + */ + get byobRequest(): ReadableStreamBYOBRequest | null; + /** + * The **`desiredSize`** read-only property of the ReadableByteStreamController interface returns the number of bytes required to fill the stream's internal queue to its 'desired size'. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`close()`** method of the ReadableByteStreamController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/close) + */ + close(): void; + /** + * The **`enqueue()`** method of the ReadableByteStreamController interface enqueues a given chunk on the associated readable byte stream (the chunk is copied into the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/enqueue) + */ + enqueue(chunk: ArrayBuffer | ArrayBufferView): void; + /** + * The **`error()`** method of the ReadableByteStreamController interface causes any future interactions with the associated stream to error with the specified reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/error) + */ + error(reason: any): void; +} +/** + * The **`WritableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a WritableStream's state. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController) + */ +declare abstract class WritableStreamDefaultController { + /** + * The read-only **`signal`** property of the WritableStreamDefaultController interface returns the AbortSignal associated with the controller. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/signal) + */ + get signal(): AbortSignal; + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/error) + */ + error(reason?: any): void; +} +/** + * The **`TransformStreamDefaultController`** interface of the Streams API provides methods to manipulate the associated ReadableStream and WritableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController) + */ +declare abstract class TransformStreamDefaultController { + /** + * The **`desiredSize`** read-only property of the TransformStreamDefaultController interface returns the desired size to fill the queue of the associated ReadableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`enqueue()`** method of the TransformStreamDefaultController interface enqueues the given chunk in the readable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/enqueue) + */ + enqueue(chunk?: O): void; + /** + * The **`error()`** method of the TransformStreamDefaultController interface errors both sides of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/error) + */ + error(reason: any): void; + /** + * The **`terminate()`** method of the TransformStreamDefaultController interface closes the readable side and errors the writable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/terminate) + */ + terminate(): void; +} +interface ReadableWritablePair { + readable: ReadableStream; + /** + * Provides a convenient, chainable way of piping this readable stream through a transform stream (or any other { writable, readable } pair). It simply pipes the stream into the writable side of the supplied pair, and returns the readable side for further use. + * + * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader. + */ + writable: WritableStream; +} +/** + * The **`WritableStream`** interface of the Streams API provides a standard abstraction for writing streaming data to a destination, known as a sink. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream) + */ +declare class WritableStream { + constructor(underlyingSink?: UnderlyingSink, queuingStrategy?: QueuingStrategy); + /** + * The **`locked`** read-only property of the WritableStream interface returns a boolean indicating whether the `WritableStream` is locked to a writer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/locked) + */ + get locked(): boolean; + /** + * The **`abort()`** method of the WritableStream interface aborts the stream, signaling that the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/abort) + */ + abort(reason?: any): Promise; + /** + * The **`close()`** method of the WritableStream interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/close) + */ + close(): Promise; + /** + * The **`getWriter()`** method of the WritableStream interface returns a new instance of WritableStreamDefaultWriter and locks the stream to that instance. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/getWriter) + */ + getWriter(): WritableStreamDefaultWriter; +} +/** + * The **`WritableStreamDefaultWriter`** interface of the Streams API is the object returned by WritableStream.getWriter() and once created locks the writer to the `WritableStream` ensuring that no other streams can write to the underlying sink. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter) + */ +declare class WritableStreamDefaultWriter { + constructor(stream: WritableStream); + /** + * The **`closed`** read-only property of the the stream errors or the writer's lock is released. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/closed) + */ + get closed(): Promise; + /** + * The **`ready`** read-only property of the that resolves when the desired size of the stream's internal queue transitions from non-positive to positive, signaling that it is no longer applying backpressure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/ready) + */ + get ready(): Promise; + /** + * The **`desiredSize`** read-only property of the to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`abort()`** method of the the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/abort) + */ + abort(reason?: any): Promise; + /** + * The **`close()`** method of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/close) + */ + close(): Promise; + /** + * The **`write()`** method of the operation. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/write) + */ + write(chunk?: W): Promise; + /** + * The **`releaseLock()`** method of the corresponding stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/releaseLock) + */ + releaseLock(): void; +} +/** + * The **`TransformStream`** interface of the Streams API represents a concrete implementation of the pipe chain _transform stream_ concept. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream) + */ +declare class TransformStream { + constructor(transformer?: Transformer, writableStrategy?: QueuingStrategy, readableStrategy?: QueuingStrategy); + /** + * The **`readable`** read-only property of the TransformStream interface returns the ReadableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/readable) + */ + get readable(): ReadableStream; + /** + * The **`writable`** read-only property of the TransformStream interface returns the WritableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/writable) + */ + get writable(): WritableStream; +} +declare class FixedLengthStream extends IdentityTransformStream { + constructor(expectedLength: number | bigint, queuingStrategy?: IdentityTransformStreamQueuingStrategy); +} +declare class IdentityTransformStream extends TransformStream { + constructor(queuingStrategy?: IdentityTransformStreamQueuingStrategy); +} +interface IdentityTransformStreamQueuingStrategy { + highWaterMark?: (number | bigint); +} +interface ReadableStreamValuesOptions { + preventCancel?: boolean; +} +/** + * The **`CompressionStream`** interface of the Compression Streams API is an API for compressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CompressionStream) + */ +declare class CompressionStream extends TransformStream { + constructor(format: "gzip" | "deflate" | "deflate-raw"); +} +/** + * The **`DecompressionStream`** interface of the Compression Streams API is an API for decompressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DecompressionStream) + */ +declare class DecompressionStream extends TransformStream { + constructor(format: "gzip" | "deflate" | "deflate-raw"); +} +/** + * The **`TextEncoderStream`** interface of the Encoding API converts a stream of strings into bytes in the UTF-8 encoding. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoderStream) + */ +declare class TextEncoderStream extends TransformStream { + constructor(); + get encoding(): string; +} +/** + * The **`TextDecoderStream`** interface of the Encoding API converts a stream of text in a binary encoding, such as UTF-8 etc., to a stream of strings. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoderStream) + */ +declare class TextDecoderStream extends TransformStream { + constructor(label?: string, options?: TextDecoderStreamTextDecoderStreamInit); + get encoding(): string; + get fatal(): boolean; + get ignoreBOM(): boolean; +} +interface TextDecoderStreamTextDecoderStreamInit { + fatal?: boolean; + ignoreBOM?: boolean; +} +/** + * The **`ByteLengthQueuingStrategy`** interface of the Streams API provides a built-in byte length queuing strategy that can be used when constructing streams. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy) + */ +declare class ByteLengthQueuingStrategy implements QueuingStrategy { + constructor(init: QueuingStrategyInit); + /** + * The read-only **`ByteLengthQueuingStrategy.highWaterMark`** property returns the total number of bytes that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/highWaterMark) + */ + get highWaterMark(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/size) */ + get size(): (chunk?: any) => number; +} +/** + * The **`CountQueuingStrategy`** interface of the Streams API provides a built-in chunk counting queuing strategy that can be used when constructing streams. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy) + */ +declare class CountQueuingStrategy implements QueuingStrategy { + constructor(init: QueuingStrategyInit); + /** + * The read-only **`CountQueuingStrategy.highWaterMark`** property returns the total number of chunks that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/highWaterMark) + */ + get highWaterMark(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/size) */ + get size(): (chunk?: any) => number; +} +interface QueuingStrategyInit { + /** + * Creates a new ByteLengthQueuingStrategy with the provided high water mark. + * + * Note that the provided high water mark will not be validated ahead of time. Instead, if it is negative, NaN, or not a number, the resulting ByteLengthQueuingStrategy will cause the corresponding stream constructor to throw. + */ + highWaterMark: number; +} +interface TracePreviewInfo { + id: string; + slug: string; + name: string; +} +interface ScriptVersion { + id?: string; + tag?: string; + message?: string; +} +declare abstract class TailEvent extends ExtendableEvent { + readonly events: TraceItem[]; + readonly traces: TraceItem[]; +} +interface TraceItem { + readonly event: (TraceItemFetchEventInfo | TraceItemJsRpcEventInfo | TraceItemConnectEventInfo | TraceItemScheduledEventInfo | TraceItemAlarmEventInfo | TraceItemQueueEventInfo | TraceItemEmailEventInfo | TraceItemTailEventInfo | TraceItemCustomEventInfo | TraceItemHibernatableWebSocketEventInfo) | null; + readonly eventTimestamp: number | null; + readonly logs: TraceLog[]; + readonly exceptions: TraceException[]; + readonly diagnosticsChannelEvents: TraceDiagnosticChannelEvent[]; + readonly scriptName: string | null; + readonly entrypoint?: string; + readonly scriptVersion?: ScriptVersion; + readonly dispatchNamespace?: string; + readonly scriptTags?: string[]; + readonly tailAttributes?: Record; + readonly preview?: TracePreviewInfo; + readonly durableObjectId?: string; + readonly outcome: string; + readonly executionModel: string; + readonly truncated: boolean; + readonly cpuTime: number; + readonly wallTime: number; +} +interface TraceItemAlarmEventInfo { + readonly scheduledTime: Date; +} +interface TraceItemConnectEventInfo { +} +interface TraceItemCustomEventInfo { +} +interface TraceItemScheduledEventInfo { + readonly scheduledTime: number; + readonly cron: string; +} +interface TraceItemQueueEventInfo { + readonly queue: string; + readonly batchSize: number; +} +interface TraceItemEmailEventInfo { + readonly mailFrom: string; + readonly rcptTo: string; + readonly rawSize: number; +} +interface TraceItemTailEventInfo { + readonly consumedEvents: TraceItemTailEventInfoTailItem[]; +} +interface TraceItemTailEventInfoTailItem { + readonly scriptName: string | null; +} +interface TraceItemFetchEventInfo { + readonly response?: TraceItemFetchEventInfoResponse; + readonly request: TraceItemFetchEventInfoRequest; +} +interface TraceItemFetchEventInfoRequest { + readonly cf?: any; + readonly headers: Record; + readonly method: string; + readonly url: string; + getUnredacted(): TraceItemFetchEventInfoRequest; +} +interface TraceItemFetchEventInfoResponse { + readonly status: number; +} +interface TraceItemJsRpcEventInfo { + readonly rpcMethod: string; +} +interface TraceItemHibernatableWebSocketEventInfo { + readonly getWebSocketEvent: TraceItemHibernatableWebSocketEventInfoMessage | TraceItemHibernatableWebSocketEventInfoClose | TraceItemHibernatableWebSocketEventInfoError; +} +interface TraceItemHibernatableWebSocketEventInfoMessage { + readonly webSocketEventType: string; +} +interface TraceItemHibernatableWebSocketEventInfoClose { + readonly webSocketEventType: string; + readonly code: number; + readonly wasClean: boolean; +} +interface TraceItemHibernatableWebSocketEventInfoError { + readonly webSocketEventType: string; +} +interface TraceLog { + readonly timestamp: number; + readonly level: string; + readonly message: any; +} +interface TraceException { + readonly timestamp: number; + readonly message: string; + readonly name: string; + readonly stack?: string; +} +interface TraceDiagnosticChannelEvent { + readonly timestamp: number; + readonly channel: string; + readonly message: any; +} +interface TraceMetrics { + readonly cpuTime: number; + readonly wallTime: number; +} +interface UnsafeTraceMetrics { + fromTrace(item: TraceItem): TraceMetrics; +} +/** + * The **`URL`** interface is used to parse, construct, normalize, and encode URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL) + */ +declare class URL { + constructor(url: string | URL, base?: string | URL); + /** + * The **`origin`** read-only property of the URL interface returns a string containing the Unicode serialization of the origin of the represented URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/origin) + */ + get origin(): string; + /** + * The **`href`** property of the URL interface is a string containing the whole URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) + */ + get href(): string; + /** + * The **`href`** property of the URL interface is a string containing the whole URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) + */ + set href(value: string); + /** + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) + */ + get protocol(): string; + /** + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) + */ + set protocol(value: string); + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + get username(): string; + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + set username(value: string); + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + get password(): string; + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + set password(value: string); + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + get host(): string; + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + set host(value: string); + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + get hostname(): string; + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + set hostname(value: string); + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + get port(): string; + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + set port(value: string); + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + get pathname(): string; + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + set pathname(value: string); + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + get search(): string; + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + set search(value: string); + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + get hash(): string; + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + set hash(value: string); + /** + * The **`searchParams`** read-only property of the access to the [MISSING: httpmethod('GET')] decoded query arguments contained in the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/searchParams) + */ + get searchParams(): URLSearchParams; + /** + * The **`toJSON()`** method of the URL interface returns a string containing a serialized version of the URL, although in practice it seems to have the same effect as ```js-nolint toJSON() ``` None. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/toJSON) + */ + toJSON(): string; + /*function toString() { [native code] }*/ + toString(): string; + /** + * The **`URL.canParse()`** static method of the URL interface returns a boolean indicating whether or not an absolute URL, or a relative URL combined with a base URL, are parsable and valid. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/canParse_static) + */ + static canParse(url: string, base?: string): boolean; + /** + * The **`URL.parse()`** static method of the URL interface returns a newly created URL object representing the URL defined by the parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/parse_static) + */ + static parse(url: string, base?: string): URL | null; + /** + * The **`createObjectURL()`** static method of the URL interface creates a string containing a URL representing the object given in the parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/createObjectURL_static) + */ + static createObjectURL(object: File | Blob): string; + /** + * The **`revokeObjectURL()`** static method of the URL interface releases an existing object URL which was previously created by calling Call this method when you've finished using an object URL to let the browser know not to keep the reference to the file any longer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/revokeObjectURL_static) + */ + static revokeObjectURL(object_url: string): void; +} +/** + * The **`URLSearchParams`** interface defines utility methods to work with the query string of a URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams) + */ +declare class URLSearchParams { + constructor(init?: (Iterable> | Record | string)); + /** + * The **`size`** read-only property of the URLSearchParams interface indicates the total number of search parameter entries. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/size) + */ + get size(): number; + /** + * The **`append()`** method of the URLSearchParams interface appends a specified key/value pair as a new search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the URLSearchParams interface deletes specified parameters and their associated value(s) from the list of all search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/delete) + */ + delete(name: string, value?: string): void; + /** + * The **`get()`** method of the URLSearchParams interface returns the first value associated to the given search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/get) + */ + get(name: string): string | null; + /** + * The **`getAll()`** method of the URLSearchParams interface returns all the values associated with a given search parameter as an array. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/getAll) + */ + getAll(name: string): string[]; + /** + * The **`has()`** method of the URLSearchParams interface returns a boolean value that indicates whether the specified parameter is in the search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/has) + */ + has(name: string, value?: string): boolean; + /** + * The **`set()`** method of the URLSearchParams interface sets the value associated with a given search parameter to the given value. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/set) + */ + set(name: string, value: string): void; + /** + * The **`URLSearchParams.sort()`** method sorts all key/value pairs contained in this object in place and returns `undefined`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/sort) + */ + sort(): void; + /* Returns an array of key, value pairs for every entry in the search params. */ + entries(): IterableIterator<[ + key: string, + value: string + ]>; + /* Returns a list of keys in the search params. */ + keys(): IterableIterator; + /* Returns a list of values in the search params. */ + values(): IterableIterator; + forEach(callback: (this: This, value: string, key: string, parent: URLSearchParams) => void, thisArg?: This): void; + /*function toString() { [native code] }*/ + toString(): string; + [Symbol.iterator](): IterableIterator<[ + key: string, + value: string + ]>; +} +declare class URLPattern { + constructor(input?: (string | URLPatternInit), baseURL?: (string | URLPatternOptions), patternOptions?: URLPatternOptions); + get protocol(): string; + get username(): string; + get password(): string; + get hostname(): string; + get port(): string; + get pathname(): string; + get search(): string; + get hash(): string; + get hasRegExpGroups(): boolean; + test(input?: (string | URLPatternInit), baseURL?: string): boolean; + exec(input?: (string | URLPatternInit), baseURL?: string): URLPatternResult | null; +} +interface URLPatternInit { + protocol?: string; + username?: string; + password?: string; + hostname?: string; + port?: string; + pathname?: string; + search?: string; + hash?: string; + baseURL?: string; +} +interface URLPatternComponentResult { + input: string; + groups: Record; +} +interface URLPatternResult { + inputs: (string | URLPatternInit)[]; + protocol: URLPatternComponentResult; + username: URLPatternComponentResult; + password: URLPatternComponentResult; + hostname: URLPatternComponentResult; + port: URLPatternComponentResult; + pathname: URLPatternComponentResult; + search: URLPatternComponentResult; + hash: URLPatternComponentResult; +} +interface URLPatternOptions { + ignoreCase?: boolean; +} +/** + * A `CloseEvent` is sent to clients using WebSockets when the connection is closed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent) + */ +declare class CloseEvent extends Event { + constructor(type: string, initializer?: CloseEventInit); + /** + * The **`code`** read-only property of the CloseEvent interface returns a WebSocket connection close code indicating the reason the connection was closed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/code) + */ + readonly code: number; + /** + * The **`reason`** read-only property of the CloseEvent interface returns the WebSocket connection close reason the server gave for closing the connection; that is, a concise human-readable prose explanation for the closure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/reason) + */ + readonly reason: string; + /** + * The **`wasClean`** read-only property of the CloseEvent interface returns `true` if the connection closed cleanly. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/wasClean) + */ + readonly wasClean: boolean; +} +interface CloseEventInit { + code?: number; + reason?: string; + wasClean?: boolean; +} +type WebSocketEventMap = { + close: CloseEvent; + message: MessageEvent; + open: Event; + error: ErrorEvent; +}; +/** + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) + */ +declare var WebSocket: { + prototype: WebSocket; + new (url: string, protocols?: (string[] | string)): WebSocket; + readonly READY_STATE_CONNECTING: number; + readonly CONNECTING: number; + readonly READY_STATE_OPEN: number; + readonly OPEN: number; + readonly READY_STATE_CLOSING: number; + readonly CLOSING: number; + readonly READY_STATE_CLOSED: number; + readonly CLOSED: number; +}; +/** + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) + */ +interface WebSocket extends EventTarget { + accept(options?: WebSocketAcceptOptions): void; + /** + * The **`WebSocket.send()`** method enqueues the specified data to be transmitted to the server over the WebSocket connection, increasing the value of `bufferedAmount` by the number of bytes needed to contain the data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/send) + */ + send(message: (ArrayBuffer | ArrayBufferView) | string): void; + /** + * The **`WebSocket.close()`** method closes the already `CLOSED`, this method does nothing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/close) + */ + close(code?: number, reason?: string): void; + serializeAttachment(attachment: any): void; + deserializeAttachment(): any | null; + /** + * The **`WebSocket.readyState`** read-only property returns the current state of the WebSocket connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/readyState) + */ + readyState: number; + /** + * The **`WebSocket.url`** read-only property returns the absolute URL of the WebSocket as resolved by the constructor. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/url) + */ + url: string | null; + /** + * The **`WebSocket.protocol`** read-only property returns the name of the sub-protocol the server selected; this will be one of the strings specified in the `protocols` parameter when creating the WebSocket object, or the empty string if no connection is established. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/protocol) + */ + protocol: string | null; + /** + * The **`WebSocket.extensions`** read-only property returns the extensions selected by the server. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/extensions) + */ + extensions: string | null; + /** + * The **`WebSocket.binaryType`** property controls the type of binary data being received over the WebSocket connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/binaryType) + */ + binaryType: "blob" | "arraybuffer"; +} +interface WebSocketAcceptOptions { + /** + * When set to `true`, receiving a server-initiated WebSocket Close frame will not + * automatically send a reciprocal Close frame, leaving the connection in a half-open + * state. This is useful for proxying scenarios where you need to coordinate closing + * both sides independently. Defaults to `false` when the + * `no_web_socket_half_open_by_default` compatibility flag is enabled. + */ + allowHalfOpen?: boolean; +} +declare const WebSocketPair: { + new (): { + 0: WebSocket; + 1: WebSocket; + }; +}; +interface SqlStorage { + exec>(query: string, ...bindings: any[]): SqlStorageCursor; + get databaseSize(): number; + Cursor: typeof SqlStorageCursor; + Statement: typeof SqlStorageStatement; +} +declare abstract class SqlStorageStatement { +} +type SqlStorageValue = ArrayBuffer | string | number | null; +declare abstract class SqlStorageCursor> { + next(): { + done?: false; + value: T; + } | { + done: true; + value?: never; + }; + toArray(): T[]; + one(): T; + raw(): IterableIterator; + columnNames: string[]; + get rowsRead(): number; + get rowsWritten(): number; + [Symbol.iterator](): IterableIterator; +} +interface Socket { + get readable(): ReadableStream; + get writable(): WritableStream; + get closed(): Promise; + get opened(): Promise; + get upgraded(): boolean; + get secureTransport(): "on" | "off" | "starttls"; + close(): Promise; + startTls(options?: TlsOptions): Socket; +} +interface SocketOptions { + secureTransport?: string; + allowHalfOpen: boolean; + highWaterMark?: (number | bigint); +} +interface SocketAddress { + hostname: string; + port: number; +} +interface TlsOptions { + expectedServerHostname?: string; +} +interface SocketInfo { + remoteAddress?: string; + localAddress?: string; +} +/** + * The **`EventSource`** interface is web content's interface to server-sent events. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource) + */ +declare class EventSource extends EventTarget { + constructor(url: string, init?: EventSourceEventSourceInit); + /** + * The **`close()`** method of the EventSource interface closes the connection, if one is made, and sets the ```js-nolint close() ``` None. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/close) + */ + close(): void; + /** + * The **`url`** read-only property of the URL of the source. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/url) + */ + get url(): string; + /** + * The **`withCredentials`** read-only property of the the `EventSource` object was instantiated with CORS credentials set. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/withCredentials) + */ + get withCredentials(): boolean; + /** + * The **`readyState`** read-only property of the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/readyState) + */ + get readyState(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */ + get onopen(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */ + set onopen(value: any | null); + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */ + get onmessage(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */ + set onmessage(value: any | null); + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */ + get onerror(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */ + set onerror(value: any | null); + static readonly CONNECTING: number; + static readonly OPEN: number; + static readonly CLOSED: number; + static from(stream: ReadableStream): EventSource; +} +interface EventSourceEventSourceInit { + withCredentials?: boolean; + fetcher?: Fetcher; +} +interface Container { + get running(): boolean; + start(options?: ContainerStartupOptions): void; + monitor(): Promise; + destroy(error?: any): Promise; + signal(signo: number): void; + getTcpPort(port: number): Fetcher; + setInactivityTimeout(durationMs: number | bigint): Promise; + interceptOutboundHttp(addr: string, binding: Fetcher): Promise; + interceptAllOutboundHttp(binding: Fetcher): Promise; + snapshotDirectory(options: ContainerDirectorySnapshotOptions): Promise; + snapshotContainer(options: ContainerSnapshotOptions): Promise; + interceptOutboundHttps(addr: string, binding: Fetcher): Promise; +} +interface ContainerDirectorySnapshot { + id: string; + size: number; + dir: string; + name?: string; +} +interface ContainerDirectorySnapshotOptions { + dir: string; + name?: string; +} +interface ContainerDirectorySnapshotRestoreParams { + snapshot: ContainerDirectorySnapshot; + mountPoint?: string; +} +interface ContainerSnapshot { + id: string; + size: number; + name?: string; +} +interface ContainerSnapshotOptions { + name?: string; +} +interface ContainerStartupOptions { + entrypoint?: string[]; + enableInternet: boolean; + env?: Record; + labels?: Record; + directorySnapshots?: ContainerDirectorySnapshotRestoreParams[]; + containerSnapshot?: ContainerSnapshot; +} +/** + * The **`MessagePort`** interface of the Channel Messaging API represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort) + */ +declare abstract class MessagePort extends EventTarget { + /** + * The **`postMessage()`** method of the transfers ownership of objects to other browsing contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/postMessage) + */ + postMessage(data?: any, options?: (any[] | MessagePortPostMessageOptions)): void; + /** + * The **`close()`** method of the MessagePort interface disconnects the port, so it is no longer active. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/close) + */ + close(): void; + /** + * The **`start()`** method of the MessagePort interface starts the sending of messages queued on the port. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/start) + */ + start(): void; + get onmessage(): any | null; + set onmessage(value: any | null); +} +/** + * The **`MessageChannel`** interface of the Channel Messaging API allows us to create a new message channel and send data through it via its two MessagePort properties. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel) + */ +declare class MessageChannel { + constructor(); + /** + * The **`port1`** read-only property of the the port attached to the context that originated the channel. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port1) + */ + readonly port1: MessagePort; + /** + * The **`port2`** read-only property of the the port attached to the context at the other end of the channel, which the message is initially sent to. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port2) + */ + readonly port2: MessagePort; +} +interface MessagePortPostMessageOptions { + transfer?: any[]; +} +type LoopbackForExport Rpc.EntrypointBranded) | ExportedHandler | undefined = undefined> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded ? LoopbackServiceStub> : T extends new (...args: any[]) => Rpc.DurableObjectBranded ? LoopbackDurableObjectClass> : T extends ExportedHandler ? LoopbackServiceStub : undefined; +type LoopbackServiceStub = Fetcher & (T extends CloudflareWorkersModule.WorkerEntrypoint ? (opts: { + props?: Props; +}) => Fetcher : (opts: { + props?: any; +}) => Fetcher); +type LoopbackDurableObjectClass = DurableObjectClass & (T extends CloudflareWorkersModule.DurableObject ? (opts: { + props?: Props; +}) => DurableObjectClass : (opts: { + props?: any; +}) => DurableObjectClass); +interface LoopbackDurableObjectNamespace extends DurableObjectNamespace { +} +interface LoopbackColoLocalActorNamespace extends ColoLocalActorNamespace { +} +interface SyncKvStorage { + get(key: string): T | undefined; + list(options?: SyncKvListOptions): Iterable<[ + string, + T + ]>; + put(key: string, value: T): void; + delete(key: string): boolean; +} +interface SyncKvListOptions { + start?: string; + startAfter?: string; + end?: string; + prefix?: string; + reverse?: boolean; + limit?: number; +} +interface WorkerStub { + getEntrypoint(name?: string, options?: WorkerStubEntrypointOptions): Fetcher; + getDurableObjectClass(name?: string, options?: WorkerStubEntrypointOptions): DurableObjectClass; +} +interface WorkerStubEntrypointOptions { + props?: any; + limits?: workerdResourceLimits; +} +interface WorkerLoader { + get(name: string | null, getCode: () => WorkerLoaderWorkerCode | Promise): WorkerStub; + load(code: WorkerLoaderWorkerCode): WorkerStub; +} +interface WorkerLoaderModule { + js?: string; + cjs?: string; + text?: string; + data?: ArrayBuffer; + json?: any; + py?: string; + wasm?: ArrayBuffer; +} +interface WorkerLoaderWorkerCode { + compatibilityDate: string; + compatibilityFlags?: string[]; + allowExperimental?: boolean; + limits?: workerdResourceLimits; + mainModule: string; + modules: Record; + env?: any; + globalOutbound?: (Fetcher | null); + tails?: Fetcher[]; + streamingTails?: Fetcher[]; +} +interface workerdResourceLimits { + cpuMs?: number; + subRequests?: number; +} +/** +* The Workers runtime supports a subset of the Performance API, used to measure timing and performance, +* as well as timing of subrequests and other operations. +* +* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) +*/ +declare abstract class Performance { + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */ + get timeOrigin(): number; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */ + now(): number; + /** + * The **`toJSON()`** method of the Performance interface is a Serialization; it returns a JSON representation of the Performance object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/toJSON) + */ + toJSON(): object; +} +interface Tracing { + enterSpan(name: string, callback: (span: Span, ...args: A) => T, ...args: A): T; + Span: typeof Span; +} +declare abstract class Span { + get isTraced(): boolean; + setAttribute(key: string, value?: (boolean | number | string)): void; +} +// ============ AI Search Error Interfaces ============ +interface AiSearchInternalError extends Error { +} +interface AiSearchNotFoundError extends Error { +} +// ============ AI Search Common Types ============ +/** A single message in a conversation-style search or chat request. */ +type AiSearchMessage = { + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; +}; +/** + * Common shape for `ai_search_options` used by both single-instance and multi-instance requests. + * Contains retrieval, query rewrite, reranking, and cache sub-options. + */ +type AiSearchOptions = { + retrieval?: { + /** Which retrieval backend to use. Defaults to the instance's configured index_method. */ + retrieval_type?: 'vector' | 'keyword' | 'hybrid'; + /** Fusion method for combining vector + keyword results. */ + fusion_method?: 'max' | 'rrf'; + /** How keyword terms are combined: "and" = all terms must match, "or" = any term matches. */ + keyword_match_mode?: 'and' | 'or'; + /** Minimum similarity score (0-1) for a result to be included. Default 0.4. */ + match_threshold?: number; + /** Maximum number of results to return (1-50). Default 10. */ + max_num_results?: number; + /** Vectorize metadata filters applied to the search. */ + filters?: VectorizeVectorMetadataFilter; + /** Number of surrounding chunks to include for context (0-3). Default 0. */ + context_expansion?: number; + /** If true, return only item metadata without chunk text. */ + metadata_only?: boolean; + /** If true (default), return empty results on retrieval failure instead of throwing. */ + return_on_failure?: boolean; + /** Boost results by metadata field values. Max 3 entries. */ + boost_by?: Array<{ + field: string; + direction?: 'asc' | 'desc' | 'exists' | 'not_exists'; + }>; + [key: string]: unknown; + }; + query_rewrite?: { + enabled?: boolean; + model?: string; + rewrite_prompt?: string; + [key: string]: unknown; + }; + reranking?: { + enabled?: boolean; + model?: string; + /** Match threshold (0-1, default 0.4) */ + match_threshold?: number; + [key: string]: unknown; + }; + cache?: { + enabled?: boolean; + cache_threshold?: 'super_strict_match' | 'close_enough' | 'flexible_friend' | 'anything_goes'; + }; + [key: string]: unknown; +}; +// ============ AI Search Request Types ============ +/** + * Request body for single-instance search. + * Exactly one of `query` or `messages` must be provided. + */ +type AiSearchSearchRequest = { + /** Simple query string. */ + query: string; + messages?: never; + ai_search_options?: AiSearchOptions; +} | { + query?: never; + /** Conversation-style input. At least one user message with non-empty content is required. */ + messages: AiSearchMessage[]; + ai_search_options?: AiSearchOptions; +}; +type AiSearchChatCompletionsRequest = { + messages: AiSearchMessage[]; + model?: string; + stream?: boolean; + ai_search_options?: AiSearchOptions; + [key: string]: unknown; +}; +// ============ AI Search Multi-Instance Types (Namespace-Scoped) ============ +/** `ai_search_options` shape for multi-instance requests — requires `instance_ids`. */ +type AiSearchMultiSearchOptions = AiSearchOptions & { + /** Instance IDs to search across (1-10). */ + instance_ids: string[]; +}; +/** + * Request for searching across multiple instances within a namespace. + * `ai_search_options` is required and must include `instance_ids`. + * Exactly one of `query` or `messages` must be provided. + */ +type AiSearchMultiSearchRequest = { + /** Simple query string. */ + query: string; + messages?: never; + ai_search_options: AiSearchMultiSearchOptions; +} | { + query?: never; + /** Conversation-style input. */ + messages: AiSearchMessage[]; + ai_search_options: AiSearchMultiSearchOptions; +}; +/** A search result chunk tagged with the instance it originated from. */ +type AiSearchMultiSearchChunk = AiSearchSearchResponse['chunks'][number] & { + instance_id: string; +}; +/** Describes a per-instance error during a multi-instance operation. */ +type AiSearchMultiSearchError = { + instance_id: string; + message: string; +}; +/** Response from a multi-instance search, with chunks tagged by instance and optional partial-failure errors. */ +type AiSearchMultiSearchResponse = { + search_query: string; + chunks: AiSearchMultiSearchChunk[]; + errors?: AiSearchMultiSearchError[]; +}; +/** Request for chat completions across multiple instances within a namespace. `ai_search_options` is required and must include `instance_ids`. */ +type AiSearchMultiChatCompletionsRequest = Omit & { + ai_search_options: AiSearchMultiSearchOptions; +}; +/** Response from multi-instance chat completions, with chunks tagged by instance and optional partial-failure errors. */ +type AiSearchMultiChatCompletionsResponse = Omit & { + chunks: AiSearchMultiSearchChunk[]; + errors?: AiSearchMultiSearchError[]; +}; +// ============ AI Search Response Types ============ +type AiSearchSearchResponse = { + search_query: string; + chunks: Array<{ + id: string; + type: string; + /** Match score (0-1) */ + score: number; + text: string; + item: { + timestamp?: number; + key: string; + metadata?: Record; + }; + scoring_details?: { + /** Keyword match score (0-1) */ + keyword_score?: number; + /** Vector similarity score (0-1) */ + vector_score?: number; + /** Keyword rank position */ + keyword_rank?: number; + /** Vector rank position */ + vector_rank?: number; + /** Reranking model score */ + reranking_score?: number; + /** Fusion method used to combine results */ + fusion_method?: 'rrf' | 'max'; + [key: string]: unknown; + }; + }>; +}; +type AiSearchChatCompletionsResponse = { + id?: string; + object?: string; + model?: string; + choices: Array<{ + index?: number; + message: { + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + [key: string]: unknown; + }; + [key: string]: unknown; + }>; + chunks: AiSearchSearchResponse['chunks']; + [key: string]: unknown; +}; +type AiSearchStatsResponse = { + queued?: number; + running?: number; + completed?: number; + error?: number; + skipped?: number; + outdated?: number; + last_activity?: string; + /** Storage engine statistics. */ + engine?: { + vectorize?: { + vectorsCount: number; + dimensions: number; + }; + r2?: { + payloadSizeBytes: number; + metadataSizeBytes: number; + objectCount: number; + }; + }; +}; +// ============ AI Search Instance Info Types ============ +type AiSearchInstanceInfo = { + id: string; + type?: 'r2' | 'web-crawler' | string; + source?: string; + source_params?: unknown; + paused?: boolean; + status?: string; + namespace?: string; + created_at?: string; + modified_at?: string; + token_id?: string; + ai_gateway_id?: string; + rewrite_query?: boolean; + reranking?: boolean; + embedding_model?: string; + ai_search_model?: string; + rewrite_model?: string; + reranking_model?: string; + /** @deprecated Use index_method instead. */ + hybrid_search_enabled?: boolean; + /** Controls which storage backends are active. */ + index_method?: { + vector?: boolean; + keyword?: boolean; + }; + /** Fusion method for combining vector and keyword results. */ + fusion_method?: 'max' | 'rrf'; + indexing_options?: { + keyword_tokenizer?: 'porter' | 'trigram'; + } | null; + retrieval_options?: { + keyword_match_mode?: 'and' | 'or'; + boost_by?: Array<{ + field: string; + direction?: 'asc' | 'desc' | 'exists' | 'not_exists'; + }>; + } | null; + chunk?: boolean; + chunk_size?: number; + chunk_overlap?: number; + score_threshold?: number; + max_num_results?: number; + cache?: boolean; + cache_threshold?: 'super_strict_match' | 'close_enough' | 'flexible_friend' | 'anything_goes'; + custom_metadata?: Array<{ + field_name: string; + data_type: 'text' | 'number' | 'boolean' | 'datetime'; + }>; + /** Sync interval in seconds. */ + sync_interval?: 3600 | 7200 | 14400 | 21600 | 43200 | 86400; + metadata?: Record; + [key: string]: unknown; +}; +/** Pagination, search, and ordering parameters for listing instances within a namespace. */ +type AiSearchListInstancesParams = { + page?: number; + per_page?: number; + /** Search instances by ID. */ + search?: string; + /** Field to sort by. */ + order_by?: 'created_at'; + /** Sort direction. */ + order_by_direction?: 'asc' | 'desc'; +}; +type AiSearchListResponse = { + result: AiSearchInstanceInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +// ============ AI Search Config Types ============ +type AiSearchConfig = { + /** Instance ID (1-32 chars, pattern: ^[a-z0-9_]+(?:-[a-z0-9_]+)*$) */ + id: string; + /** Instance type. Omit to create with built-in storage. */ + type?: 'r2' | 'web-crawler' | string; + /** Source URL (required for web-crawler type). */ + source?: string; + source_params?: unknown; + /** Token ID (UUID format) */ + token_id?: string; + ai_gateway_id?: string; + /** Enable query rewriting (default false) */ + rewrite_query?: boolean; + /** Enable reranking (default false) */ + reranking?: boolean; + embedding_model?: string; + ai_search_model?: string; + rewrite_model?: string; + reranking_model?: string; + /** @deprecated Use index_method instead. */ + hybrid_search_enabled?: boolean; + /** Controls which storage backends are used during indexing. Defaults to vector-only. */ + index_method?: { + vector?: boolean; + keyword?: boolean; + }; + /** Fusion method for combining vector and keyword results. "rrf" = reciprocal rank fusion (default), "max" = maximum score. */ + fusion_method?: 'max' | 'rrf'; + indexing_options?: { + keyword_tokenizer?: 'porter' | 'trigram'; + } | null; + retrieval_options?: { + keyword_match_mode?: 'and' | 'or'; + boost_by?: Array<{ + field: string; + direction?: 'asc' | 'desc' | 'exists' | 'not_exists'; + }>; + } | null; + chunk?: boolean; + chunk_size?: number; + chunk_overlap?: number; + /** Minimum similarity score (0-1) for a result to be included. */ + score_threshold?: number; + max_num_results?: number; + cache?: boolean; + /** Similarity threshold for cache hits. Stricter = fewer cache hits but higher relevance. */ + cache_threshold?: 'super_strict_match' | 'close_enough' | 'flexible_friend' | 'anything_goes'; + custom_metadata?: Array<{ + field_name: string; + data_type: 'text' | 'number' | 'boolean' | 'datetime'; + }>; + namespace?: string; + /** Sync interval in seconds. 3600=1h, 7200=2h, 14400=4h, 21600=6h, 43200=12h, 86400=24h. */ + sync_interval?: 3600 | 7200 | 14400 | 21600 | 43200 | 86400; + metadata?: Record; + [key: string]: unknown; +}; +// ============ AI Search Item Types ============ +type AiSearchItemInfo = { + id: string; + key: string; + status: 'completed' | 'error' | 'skipped' | 'queued' | 'running' | 'outdated'; + next_action?: 'INDEX' | 'DELETE' | null; + error?: string; + checksum?: string; + namespace?: string; + chunks_count?: number | null; + file_size?: number | null; + source_id?: string | null; + last_seen_at?: string; + created_at?: string; + metadata?: Record; + [key: string]: unknown; +}; +type AiSearchItemContentResult = { + body: ReadableStream; + contentType: string; + filename: string; + size: number; +}; +type AiSearchUploadItemOptions = { + metadata?: Record; +}; +type AiSearchListItemsParams = { + page?: number; + per_page?: number; + /** Search items by key name. */ + search?: string; + /** Sort order for results. */ + sort_by?: 'status' | 'modified_at'; + /** Filter items by processing status. */ + status?: 'queued' | 'running' | 'completed' | 'error' | 'skipped' | 'outdated'; + /** Filter items by source (e.g. "builtin" or "web-crawler:https://example.com"). */ + source?: string; + /** JSON-encoded Vectorize filter for metadata filtering. */ + metadata_filter?: string; +}; +type AiSearchListItemsResponse = { + result: AiSearchItemInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +// ============ AI Search Item Logs Types ============ +type AiSearchItemLogsParams = { + /** Maximum number of log entries to return (1-100, default 50). */ + limit?: number; + /** Opaque cursor for pagination. Pass the `cursor` value from a previous response. */ + cursor?: string; +}; +type AiSearchItemLog = { + timestamp: string; + action: string; + message: string; + fileKey?: string; + chunkCount?: number; + processingTimeMs?: number; + errorType?: string; +}; +/** Paginated response for item processing logs (cursor-based). */ +type AiSearchItemLogsResponse = { + result: AiSearchItemLog[]; + result_info: { + count: number; + per_page: number; + cursor: string | null; + truncated: boolean; + }; +}; +// ============ AI Search Item Chunks Types ============ +type AiSearchItemChunksParams = { + /** Maximum number of chunks to return (1-100, default 20). */ + limit?: number; + /** Offset into the chunks list (default 0). */ + offset?: number; +}; +/** A single indexed chunk belonging to an item, including its text content and byte range. */ +type AiSearchItemChunk = { + id: string; + text: string; + start_byte: number; + end_byte: number; + item?: { + timestamp?: number; + key: string; + metadata?: Record; + }; +}; +/** Paginated response for item chunks (offset-based). */ +type AiSearchItemChunksResponse = { + result: AiSearchItemChunk[]; + result_info: { + count: number; + total: number; + limit: number; + offset: number; + }; +}; +// ============ AI Search Job Types ============ +type AiSearchJobInfo = { + id: string; + source: 'user' | 'schedule'; + description?: string; + last_seen_at?: string; + started_at?: string; + ended_at?: string; + end_reason?: string; +}; +type AiSearchJobLog = { + id: number; + message: string; + message_type: number; + created_at: number; +}; +type AiSearchCreateJobParams = { + description?: string; +}; +type AiSearchListJobsParams = { + page?: number; + per_page?: number; +}; +type AiSearchListJobsResponse = { + result: AiSearchJobInfo[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +type AiSearchJobLogsParams = { + page?: number; + per_page?: number; +}; +type AiSearchJobLogsResponse = { + result: AiSearchJobLog[]; + result_info?: { + count: number; + page: number; + per_page: number; + total_count: number; + }; +}; +// ============ AI Search Sub-Service Classes ============ +/** + * Single item service for an AI Search instance. + * Provides info, download, sync, logs, and chunks operations on a specific item. + */ +declare abstract class AiSearchItem { + /** Get metadata about this item. */ + info(): Promise; + /** + * Download the item's content. + * @returns Object with body stream, content type, filename, and size. + */ + download(): Promise; + /** + * Trigger re-indexing of this item. + * @returns The updated item info. + */ + sync(): Promise; + /** + * Retrieve processing logs for this item (cursor-based pagination). + * @param params Optional pagination parameters (limit, cursor). + * @returns Paginated log entries for this item. + */ + logs(params?: AiSearchItemLogsParams): Promise; + /** + * List indexed chunks for this item (offset-based pagination). + * @param params Optional pagination parameters (limit, offset). + * @returns Paginated chunk entries for this item. + */ + chunks(params?: AiSearchItemChunksParams): Promise; +} +/** + * Items collection service for an AI Search instance. + * Provides list, upload, and access to individual items. + */ +declare abstract class AiSearchItems { + /** List items in this instance. */ + list(params?: AiSearchListItemsParams): Promise; + /** + * Upload a file as an item. Behaves as an upsert: if an item with the same + * filename already exists, it is overwritten and re-indexed. + * @param name Filename for the uploaded item. + * @param content File content as a ReadableStream, Blob, or string. + * @param options Optional metadata to attach to the item. + * @returns The created item info. + */ + upload(name: string, content: ReadableStream | Blob | string, options?: AiSearchUploadItemOptions): Promise; + /** + * Upload a file and poll until processing completes. + * Behaves as an upsert: if an item with the same filename already exists, + * it is overwritten and re-indexed. + * @param name Filename for the uploaded item. + * @param content File content as a ReadableStream, Blob, or string. + * @param options Optional metadata and polling configuration. + * @returns The item info after processing completes (or timeout). + */ + uploadAndPoll(name: string, content: ReadableStream | Blob | string, options?: AiSearchUploadItemOptions & { + /** Polling interval in milliseconds (default 1000). */ + pollIntervalMs?: number; + /** Maximum time to wait in milliseconds (default 30000). */ + timeoutMs?: number; + }): Promise; + /** + * Get an item by ID. + * @param itemId The item identifier. + * @returns Item service for info, download, sync, logs, and chunks operations. + */ + get(itemId: string): AiSearchItem; + /** + * Delete an item from the instance. + * @param itemId The item identifier. + */ + delete(itemId: string): Promise; +} +/** + * Single job service for an AI Search instance. + * Provides info, logs, and cancel operations for a specific job. + */ +declare abstract class AiSearchJob { + /** Get metadata about this job. */ + info(): Promise; + /** Get logs for this job. */ + logs(params?: AiSearchJobLogsParams): Promise; + /** + * Cancel a running job. + * @returns The updated job info. + * @throws AiSearchNotFoundError if the job does not exist. + */ + cancel(): Promise; +} +/** + * Jobs collection service for an AI Search instance. + * Provides list, create, and access to individual jobs. + */ +declare abstract class AiSearchJobs { + /** List jobs for this instance. */ + list(params?: AiSearchListJobsParams): Promise; + /** + * Create a new indexing job. + * @param params Optional job parameters. + * @returns The created job info. + */ + create(params?: AiSearchCreateJobParams): Promise; + /** + * Get a job by ID. + * @param jobId The job identifier. + * @returns Job service for info, logs, and cancel operations. + */ + get(jobId: string): AiSearchJob; +} +// ============ AI Search Binding Classes ============ +/** + * Instance-level AI Search service. + * + * Used as: + * - The return type of `AiSearchNamespace.get(name)` (namespace binding) + * - The type of `env.BLOG_SEARCH` (single instance binding via `ai_search`) + * + * Provides search, chat, update, stats, items, and jobs operations. + * + * @example + * ```ts + * // Via namespace binding + * const instance = env.AI_SEARCH.get("blog"); + * const results = await instance.search({ + * query: "How does caching work?", + * }); + * + * // Via single instance binding + * const results = await env.BLOG_SEARCH.search({ + * messages: [{ role: "user", content: "How does caching work?" }], + * }); + * ``` + */ +declare abstract class AiSearchInstance { + /** + * Search the AI Search instance for relevant chunks. + * @param params Search request with query or messages and optional AI search options. + * @returns Search response with matching chunks and search query. + */ + search(params: AiSearchSearchRequest): Promise; + /** + * Generate chat completions with AI Search context (streaming). + * @param params Chat completions request with stream: true. + * @returns ReadableStream of server-sent events. + */ + chatCompletions(params: AiSearchChatCompletionsRequest & { + stream: true; + }): Promise; + /** + * Generate chat completions with AI Search context. + * @param params Chat completions request. + * @returns Chat completion response with choices and RAG chunks. + */ + chatCompletions(params: AiSearchChatCompletionsRequest): Promise; + /** + * Update the instance configuration. + * @param config Partial configuration to update. + * @returns Updated instance info. + */ + update(config: Partial): Promise; + /** Get metadata about this instance. */ + info(): Promise; + /** + * Get instance statistics (item count, indexing status, etc.). + * @returns Statistics with counts per status, last activity time, and engine details. + */ + stats(): Promise; + /** Items collection — list, upload, and manage items in this instance. */ + get items(): AiSearchItems; + /** Jobs collection — list, create, and inspect indexing jobs. */ + get jobs(): AiSearchJobs; +} +/** + * Namespace-level AI Search service. + * + * Used as the type of `env.AI_SEARCH` (namespace binding via `ai_search_namespaces`). + * Scoped to a single namespace. Provides dynamic instance access, creation, deletion, + * and multi-instance search/chat operations. + * + * @example + * ```ts + * // Access an instance within the namespace + * const blog = env.AI_SEARCH.get("blog"); + * const results = await blog.search({ query: "How does caching work?" }); + * + * // List all instances in the namespace + * const instances = await env.AI_SEARCH.list(); + * + * // Create a new instance with built-in storage + * const tenant = await env.AI_SEARCH.create({ id: "tenant-123" }); + * + * // Upload items into the instance + * await tenant.items.upload("doc.pdf", fileContent); + * + * // Search across multiple instances + * const multi = await env.AI_SEARCH.search({ + * query: "caching", + * ai_search_options: { instance_ids: ["blog", "docs"] }, + * }); + * + * // Delete an instance + * await env.AI_SEARCH.delete("tenant-123"); + * ``` + */ +declare abstract class AiSearchNamespace { + /** + * Get an instance by name within the bound namespace. + * @param name Instance name. + * @returns Instance service for search, chat, update, stats, items, and jobs. + */ + get(name: string): AiSearchInstance; + /** + * List instances in the bound namespace. + * @param params Optional pagination, search, and ordering parameters. + * @returns Array of instance metadata with pagination info. + */ + list(params?: AiSearchListInstancesParams): Promise; + /** + * Create a new instance within the bound namespace. + * @param config Instance configuration. Only `id` is required — omit `type` and `source` to create with built-in storage. + * @returns Instance service for the newly created instance. + * + * @example + * ```ts + * // Create with built-in storage (upload items manually) + * const instance = await env.AI_SEARCH.create({ id: "my-search" }); + * + * // Create with web crawler source + * const instance = await env.AI_SEARCH.create({ + * id: "docs-search", + * type: "web-crawler", + * source: "https://developers.cloudflare.com", + * }); + * ``` + */ + create(config: AiSearchConfig): Promise; + /** + * Delete an instance from the bound namespace. + * @param name Instance name to delete. + */ + delete(name: string): Promise; + /** + * Search across multiple instances within the bound namespace. + * Fans out to the specified instance_ids and merges results. + * @param params Search request with required `ai_search_options.instance_ids`. + * @returns Search response with chunks tagged by instance_id and optional partial-failure errors. + */ + search(params: AiSearchMultiSearchRequest): Promise; + /** + * Generate chat completions across multiple instances within the bound namespace (streaming). + * Fans out to the specified instance_ids, merges context, and generates a response. + * @param params Chat completions request with stream: true and required `ai_search_options.instance_ids`. + * @returns ReadableStream of server-sent events. + */ + chatCompletions(params: AiSearchMultiChatCompletionsRequest & { + stream: true; + }): Promise; + /** + * Generate chat completions across multiple instances within the bound namespace. + * Fans out to the specified instance_ids, merges context, and generates a response. + * @param params Chat completions request with required `ai_search_options.instance_ids`. + * @returns Chat completion response with choices, chunks tagged by instance_id, and optional partial-failure errors. + */ + chatCompletions(params: AiSearchMultiChatCompletionsRequest): Promise; +} +type AiImageClassificationInput = { + image: number[]; +}; +type AiImageClassificationOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiImageClassification { + inputs: AiImageClassificationInput; + postProcessedOutputs: AiImageClassificationOutput; +} +type AiImageToTextInput = { + image: number[]; + prompt?: string; + max_tokens?: number; + temperature?: number; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageToText { + inputs: AiImageToTextInput; + postProcessedOutputs: AiImageToTextOutput; +} +type AiImageTextToTextInput = { + image: string; + prompt?: string; + max_tokens?: number; + temperature?: number; + ignore_eos?: boolean; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageTextToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageTextToText { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiMultimodalEmbeddingsInput = { + image: string; + text: string[]; +}; +type AiIMultimodalEmbeddingsOutput = { + data: number[][]; + shape: number[]; +}; +declare abstract class BaseAiMultimodalEmbeddings { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiObjectDetectionInput = { + image: number[]; +}; +type AiObjectDetectionOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiObjectDetection { + inputs: AiObjectDetectionInput; + postProcessedOutputs: AiObjectDetectionOutput; +} +type AiSentenceSimilarityInput = { + source: string; + sentences: string[]; +}; +type AiSentenceSimilarityOutput = number[]; +declare abstract class BaseAiSentenceSimilarity { + inputs: AiSentenceSimilarityInput; + postProcessedOutputs: AiSentenceSimilarityOutput; +} +type AiAutomaticSpeechRecognitionInput = { + audio: number[]; +}; +type AiAutomaticSpeechRecognitionOutput = { + text?: string; + words?: { + word: string; + start: number; + end: number; + }[]; + vtt?: string; +}; +declare abstract class BaseAiAutomaticSpeechRecognition { + inputs: AiAutomaticSpeechRecognitionInput; + postProcessedOutputs: AiAutomaticSpeechRecognitionOutput; +} +type AiSummarizationInput = { + input_text: string; + max_length?: number; +}; +type AiSummarizationOutput = { + summary: string; +}; +declare abstract class BaseAiSummarization { + inputs: AiSummarizationInput; + postProcessedOutputs: AiSummarizationOutput; +} +type AiTextClassificationInput = { + text: string; +}; +type AiTextClassificationOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiTextClassification { + inputs: AiTextClassificationInput; + postProcessedOutputs: AiTextClassificationOutput; +} +type AiTextEmbeddingsInput = { + text: string | string[]; +}; +type AiTextEmbeddingsOutput = { + shape: number[]; + data: number[][]; +}; +declare abstract class BaseAiTextEmbeddings { + inputs: AiTextEmbeddingsInput; + postProcessedOutputs: AiTextEmbeddingsOutput; +} +type RoleScopedChatInput = { + role: "user" | "assistant" | "system" | "tool" | (string & NonNullable); + content: string; + name?: string; +}; +type AiTextGenerationToolLegacyInput = { + name: string; + description: string; + parameters?: { + type: "object" | (string & NonNullable); + properties: { + [key: string]: { + type: string; + description?: string; + }; + }; + required: string[]; + }; +}; +type AiTextGenerationToolInput = { + type: "function" | (string & NonNullable); + function: { + name: string; + description: string; + parameters?: { + type: "object" | (string & NonNullable); + properties: { + [key: string]: { + type: string; + description?: string; + }; + }; + required: string[]; + }; + }; +}; +type AiTextGenerationFunctionsInput = { + name: string; + code: string; +}; +type AiTextGenerationResponseFormat = { + type: string; + json_schema?: any; +}; +type AiTextGenerationInput = { + prompt?: string; + raw?: boolean; + stream?: boolean; + max_tokens?: number; + temperature?: number; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + messages?: RoleScopedChatInput[]; + response_format?: AiTextGenerationResponseFormat; + tools?: AiTextGenerationToolInput[] | AiTextGenerationToolLegacyInput[] | (object & NonNullable); + functions?: AiTextGenerationFunctionsInput[]; +}; +type AiTextGenerationToolLegacyOutput = { + name: string; + arguments: unknown; +}; +type AiTextGenerationToolOutput = { + id: string; + type: "function"; + function: { + name: string; + arguments: string; + }; +}; +type UsageTags = { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; +}; +type AiTextGenerationOutput = { + response?: string; + tool_calls?: AiTextGenerationToolLegacyOutput[] & AiTextGenerationToolOutput[]; + usage?: UsageTags; +}; +declare abstract class BaseAiTextGeneration { + inputs: AiTextGenerationInput; + postProcessedOutputs: AiTextGenerationOutput; +} +type AiTextToSpeechInput = { + prompt: string; + lang?: string; +}; +type AiTextToSpeechOutput = Uint8Array | { + audio: string; +}; +declare abstract class BaseAiTextToSpeech { + inputs: AiTextToSpeechInput; + postProcessedOutputs: AiTextToSpeechOutput; +} +type AiTextToImageInput = { + prompt: string; + negative_prompt?: string; + height?: number; + width?: number; + image?: number[]; + image_b64?: string; + mask?: number[]; + num_steps?: number; + strength?: number; + guidance?: number; + seed?: number; +}; +type AiTextToImageOutput = ReadableStream; +declare abstract class BaseAiTextToImage { + inputs: AiTextToImageInput; + postProcessedOutputs: AiTextToImageOutput; +} +type AiTranslationInput = { + text: string; + target_lang: string; + source_lang?: string; +}; +type AiTranslationOutput = { + translated_text?: string; +}; +declare abstract class BaseAiTranslation { + inputs: AiTranslationInput; + postProcessedOutputs: AiTranslationOutput; +} +/** + * Workers AI support for OpenAI's Chat Completions API + */ +type ChatCompletionContentPartText = { + type: "text"; + text: string; +}; +type ChatCompletionContentPartImage = { + type: "image_url"; + image_url: { + url: string; + detail?: "auto" | "low" | "high"; + }; +}; +type ChatCompletionContentPartInputAudio = { + type: "input_audio"; + input_audio: { + /** Base64 encoded audio data. */ + data: string; + format: "wav" | "mp3"; + }; +}; +type ChatCompletionContentPartFile = { + type: "file"; + file: { + /** Base64 encoded file data. */ + file_data?: string; + /** The ID of an uploaded file. */ + file_id?: string; + filename?: string; + }; +}; +type ChatCompletionContentPartRefusal = { + type: "refusal"; + refusal: string; +}; +type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage | ChatCompletionContentPartInputAudio | ChatCompletionContentPartFile; +type FunctionDefinition = { + name: string; + description?: string; + parameters?: Record; + strict?: boolean | null; +}; +type ChatCompletionFunctionTool = { + type: "function"; + function: FunctionDefinition; +}; +type ChatCompletionCustomToolGrammarFormat = { + type: "grammar"; + grammar: { + definition: string; + syntax: "lark" | "regex"; + }; +}; +type ChatCompletionCustomToolTextFormat = { + type: "text"; +}; +type ChatCompletionCustomToolFormat = ChatCompletionCustomToolTextFormat | ChatCompletionCustomToolGrammarFormat; +type ChatCompletionCustomTool = { + type: "custom"; + custom: { + name: string; + description?: string; + format?: ChatCompletionCustomToolFormat; + }; +}; +type ChatCompletionTool = ChatCompletionFunctionTool | ChatCompletionCustomTool; +type ChatCompletionMessageFunctionToolCall = { + id: string; + type: "function"; + function: { + name: string; + /** JSON-encoded arguments string. */ + arguments: string; + }; +}; +type ChatCompletionMessageCustomToolCall = { + id: string; + type: "custom"; + custom: { + name: string; + input: string; + }; +}; +type ChatCompletionMessageToolCall = ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall; +type ChatCompletionToolChoiceFunction = { + type: "function"; + function: { + name: string; + }; +}; +type ChatCompletionToolChoiceCustom = { + type: "custom"; + custom: { + name: string; + }; +}; +type ChatCompletionToolChoiceAllowedTools = { + type: "allowed_tools"; + allowed_tools: { + mode: "auto" | "required"; + tools: Array>; + }; +}; +type ChatCompletionToolChoiceOption = "none" | "auto" | "required" | ChatCompletionToolChoiceFunction | ChatCompletionToolChoiceCustom | ChatCompletionToolChoiceAllowedTools; +type DeveloperMessage = { + role: "developer"; + content: string | Array<{ + type: "text"; + text: string; + }>; + name?: string; +}; +type SystemMessage = { + role: "system"; + content: string | Array<{ + type: "text"; + text: string; + }>; + name?: string; +}; +/** + * Permissive merged content part used inside UserMessage arrays. + * + * Cabidela has a limitation where anyOf/oneOf with enum-based discrimination + * inside nested array items does not correctly match different branches for + * different array elements, so the schema uses a single merged object. + */ +type UserMessageContentPart = { + type: "text" | "image_url" | "input_audio" | "file"; + text?: string; + image_url?: { + url?: string; + detail?: "auto" | "low" | "high"; + }; + input_audio?: { + data?: string; + format?: "wav" | "mp3"; + }; + file?: { + file_data?: string; + file_id?: string; + filename?: string; + }; +}; +type UserMessage = { + role: "user"; + content: string | Array; + name?: string; +}; +type AssistantMessageContentPart = { + type: "text" | "refusal"; + text?: string; + refusal?: string; +}; +type AssistantMessage = { + role: "assistant"; + content?: string | null | Array; + refusal?: string | null; + name?: string; + audio?: { + id: string; + }; + tool_calls?: Array; + function_call?: { + name: string; + arguments: string; + }; +}; +type ToolMessage = { + role: "tool"; + content: string | Array<{ + type: "text"; + text: string; + }>; + tool_call_id: string; +}; +type FunctionMessage = { + role: "function"; + content: string; + name: string; +}; +type ChatCompletionMessageParam = DeveloperMessage | SystemMessage | UserMessage | AssistantMessage | ToolMessage | FunctionMessage; +type ChatCompletionsResponseFormatText = { + type: "text"; +}; +type ChatCompletionsResponseFormatJSONObject = { + type: "json_object"; +}; +type ResponseFormatJSONSchema = { + type: "json_schema"; + json_schema: { + name: string; + description?: string; + schema?: Record; + strict?: boolean | null; + }; +}; +type ResponseFormat = ChatCompletionsResponseFormatText | ChatCompletionsResponseFormatJSONObject | ResponseFormatJSONSchema; +type ChatCompletionsStreamOptions = { + include_usage?: boolean; + include_obfuscation?: boolean; +}; +type PredictionContent = { + type: "content"; + content: string | Array<{ + type: "text"; + text: string; + }>; +}; +type AudioParams = { + voice: string | { + id: string; + }; + format: "wav" | "aac" | "mp3" | "flac" | "opus" | "pcm16"; +}; +type WebSearchUserLocation = { + type: "approximate"; + approximate: { + city?: string; + country?: string; + region?: string; + timezone?: string; + }; +}; +type WebSearchOptions = { + search_context_size?: "low" | "medium" | "high"; + user_location?: WebSearchUserLocation; +}; +type ChatTemplateKwargs = { + /** Whether to enable reasoning, enabled by default. */ + enable_thinking?: boolean; + /** If false, preserves reasoning context between turns. */ + clear_thinking?: boolean; +}; +/** Shared optional properties used by both Prompt and Messages input branches. */ +type ChatCompletionsCommonOptions = { + model?: string; + audio?: AudioParams; + frequency_penalty?: number | null; + logit_bias?: Record | null; + logprobs?: boolean | null; + top_logprobs?: number | null; + max_tokens?: number | null; + max_completion_tokens?: number | null; + metadata?: Record | null; + modalities?: Array<"text" | "audio"> | null; + n?: number | null; + parallel_tool_calls?: boolean; + prediction?: PredictionContent; + presence_penalty?: number | null; + reasoning_effort?: "low" | "medium" | "high" | null; + chat_template_kwargs?: ChatTemplateKwargs; + response_format?: ResponseFormat; + seed?: number | null; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + stop?: string | Array | null; + store?: boolean | null; + stream?: boolean | null; + stream_options?: ChatCompletionsStreamOptions; + temperature?: number | null; + tool_choice?: ChatCompletionToolChoiceOption; + tools?: Array; + top_p?: number | null; + user?: string; + web_search_options?: WebSearchOptions; + function_call?: "none" | "auto" | { + name: string; + }; + functions?: Array; +}; +type PromptTokensDetails = { + cached_tokens?: number; + audio_tokens?: number; +}; +type CompletionTokensDetails = { + reasoning_tokens?: number; + audio_tokens?: number; + accepted_prediction_tokens?: number; + rejected_prediction_tokens?: number; +}; +type CompletionUsage = { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + prompt_tokens_details?: PromptTokensDetails; + completion_tokens_details?: CompletionTokensDetails; +}; +type ChatCompletionTopLogprob = { + token: string; + logprob: number; + bytes: Array | null; +}; +type ChatCompletionTokenLogprob = { + token: string; + logprob: number; + bytes: Array | null; + top_logprobs: Array; +}; +type ChatCompletionAudio = { + id: string; + /** Base64 encoded audio bytes. */ + data: string; + expires_at: number; + transcript: string; +}; +type ChatCompletionUrlCitation = { + type: "url_citation"; + url_citation: { + url: string; + title: string; + start_index: number; + end_index: number; + }; +}; +type ChatCompletionResponseMessage = { + role: "assistant"; + content: string | null; + refusal: string | null; + annotations?: Array; + audio?: ChatCompletionAudio; + tool_calls?: Array; + function_call?: { + name: string; + arguments: string; + } | null; +}; +type ChatCompletionLogprobs = { + content: Array | null; + refusal?: Array | null; +}; +type ChatCompletionChoice = { + index: number; + message: ChatCompletionResponseMessage; + finish_reason: "stop" | "length" | "tool_calls" | "content_filter" | "function_call"; + logprobs: ChatCompletionLogprobs | null; +}; +type ChatCompletionsPromptInput = { + prompt: string; +} & ChatCompletionsCommonOptions; +type ChatCompletionsMessagesInput = { + messages: Array; +} & ChatCompletionsCommonOptions; +type ChatCompletionsOutput = { + id: string; + object: string; + created: number; + model: string; + choices: Array; + usage?: CompletionUsage; + system_fingerprint?: string | null; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; +}; +/** + * Workers AI support for OpenAI's Responses API + * Reference: https://github.com/openai/openai-node/blob/master/src/resources/responses/responses.ts + * + * It's a stripped down version from its source. + * It currently supports basic function calling, json mode and accepts images as input. + * + * It does not include types for WebSearch, CodeInterpreter, FileInputs, MCP, CustomTools. + * We plan to add those incrementally as model + platform capabilities evolve. + */ +type ResponsesInput = { + background?: boolean | null; + conversation?: string | ResponseConversationParam | null; + include?: Array | null; + input?: string | ResponseInput; + instructions?: string | null; + max_output_tokens?: number | null; + parallel_tool_calls?: boolean | null; + previous_response_id?: string | null; + prompt_cache_key?: string; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + stream?: boolean | null; + stream_options?: StreamOptions | null; + temperature?: number | null; + text?: ResponseTextConfig; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + truncation?: "auto" | "disabled" | null; +}; +type ResponsesOutput = { + id?: string; + created_at?: number; + output_text?: string; + error?: ResponseError | null; + incomplete_details?: ResponseIncompleteDetails | null; + instructions?: string | Array | null; + object?: "response"; + output?: Array; + parallel_tool_calls?: boolean; + temperature?: number | null; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + max_output_tokens?: number | null; + previous_response_id?: string | null; + prompt?: ResponsePrompt | null; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: "auto" | "default" | "flex" | "scale" | "priority" | null; + status?: ResponseStatus; + text?: ResponseTextConfig; + truncation?: "auto" | "disabled" | null; + usage?: ResponseUsage; +}; +type EasyInputMessage = { + content: string | ResponseInputMessageContentList; + role: "user" | "assistant" | "system" | "developer"; + type?: "message"; +}; +type ResponsesFunctionTool = { + name: string; + parameters: { + [key: string]: unknown; + } | null; + strict: boolean | null; + type: "function"; + description?: string | null; +}; +type ResponseIncompleteDetails = { + reason?: "max_output_tokens" | "content_filter"; +}; +type ResponsePrompt = { + id: string; + variables?: { + [key: string]: string | ResponseInputText | ResponseInputImage; + } | null; + version?: string | null; +}; +type Reasoning = { + effort?: ReasoningEffort | null; + generate_summary?: "auto" | "concise" | "detailed" | null; + summary?: "auto" | "concise" | "detailed" | null; +}; +type ResponseContent = ResponseInputText | ResponseInputImage | ResponseOutputText | ResponseOutputRefusal | ResponseContentReasoningText; +type ResponseContentReasoningText = { + text: string; + type: "reasoning_text"; +}; +type ResponseConversationParam = { + id: string; +}; +type ResponseCreatedEvent = { + response: Response; + sequence_number: number; + type: "response.created"; +}; +type ResponseCustomToolCallOutput = { + call_id: string; + output: string | Array; + type: "custom_tool_call_output"; + id?: string; +}; +type ResponseError = { + code: "server_error" | "rate_limit_exceeded" | "invalid_prompt" | "vector_store_timeout" | "invalid_image" | "invalid_image_format" | "invalid_base64_image" | "invalid_image_url" | "image_too_large" | "image_too_small" | "image_parse_error" | "image_content_policy_violation" | "invalid_image_mode" | "image_file_too_large" | "unsupported_image_media_type" | "empty_image_file" | "failed_to_download_image" | "image_file_not_found"; + message: string; +}; +type ResponseErrorEvent = { + code: string | null; + message: string; + param: string | null; + sequence_number: number; + type: "error"; +}; +type ResponseFailedEvent = { + response: Response; + sequence_number: number; + type: "response.failed"; +}; +type ResponseFormatText = { + type: "text"; +}; +type ResponseFormatJSONObject = { + type: "json_object"; +}; +type ResponseFormatTextConfig = ResponseFormatText | ResponseFormatTextJSONSchemaConfig | ResponseFormatJSONObject; +type ResponseFormatTextJSONSchemaConfig = { + name: string; + schema: { + [key: string]: unknown; + }; + type: "json_schema"; + description?: string; + strict?: boolean | null; +}; +type ResponseFunctionCallArgumentsDeltaEvent = { + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.function_call_arguments.delta"; +}; +type ResponseFunctionCallArgumentsDoneEvent = { + arguments: string; + item_id: string; + name: string; + output_index: number; + sequence_number: number; + type: "response.function_call_arguments.done"; +}; +type ResponseFunctionCallOutputItem = ResponseInputTextContent | ResponseInputImageContent; +type ResponseFunctionCallOutputItemList = Array; +type ResponseFunctionToolCall = { + arguments: string; + call_id: string; + name: string; + type: "function_call"; + id?: string; + status?: "in_progress" | "completed" | "incomplete"; +}; +interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + id: string; +} +type ResponseFunctionToolCallOutputItem = { + id: string; + call_id: string; + output: string | Array; + type: "function_call_output"; + status?: "in_progress" | "completed" | "incomplete"; +}; +type ResponseIncludable = "message.input_image.image_url" | "message.output_text.logprobs"; +type ResponseIncompleteEvent = { + response: Response; + sequence_number: number; + type: "response.incomplete"; +}; +type ResponseInput = Array; +type ResponseInputContent = ResponseInputText | ResponseInputImage; +type ResponseInputImage = { + detail: "low" | "high" | "auto"; + type: "input_image"; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputImageContent = { + type: "input_image"; + detail?: "low" | "high" | "auto" | null; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputItem = EasyInputMessage | ResponseInputItemMessage | ResponseOutputMessage | ResponseFunctionToolCall | ResponseInputItemFunctionCallOutput | ResponseReasoningItem; +type ResponseInputItemFunctionCallOutput = { + call_id: string; + output: string | ResponseFunctionCallOutputItemList; + type: "function_call_output"; + id?: string | null; + status?: "in_progress" | "completed" | "incomplete" | null; +}; +type ResponseInputItemMessage = { + content: ResponseInputMessageContentList; + role: "user" | "system" | "developer"; + status?: "in_progress" | "completed" | "incomplete"; + type?: "message"; +}; +type ResponseInputMessageContentList = Array; +type ResponseInputMessageItem = { + id: string; + content: ResponseInputMessageContentList; + role: "user" | "system" | "developer"; + status?: "in_progress" | "completed" | "incomplete"; + type?: "message"; +}; +type ResponseInputText = { + text: string; + type: "input_text"; +}; +type ResponseInputTextContent = { + text: string; + type: "input_text"; +}; +type ResponseItem = ResponseInputMessageItem | ResponseOutputMessage | ResponseFunctionToolCallItem | ResponseFunctionToolCallOutputItem; +type ResponseOutputItem = ResponseOutputMessage | ResponseFunctionToolCall | ResponseReasoningItem; +type ResponseOutputItemAddedEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: "response.output_item.added"; +}; +type ResponseOutputItemDoneEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: "response.output_item.done"; +}; +type ResponseOutputMessage = { + id: string; + content: Array; + role: "assistant"; + status: "in_progress" | "completed" | "incomplete"; + type: "message"; +}; +type ResponseOutputRefusal = { + refusal: string; + type: "refusal"; +}; +type ResponseOutputText = { + text: string; + type: "output_text"; + logprobs?: Array; +}; +type ResponseReasoningItem = { + id: string; + summary: Array; + type: "reasoning"; + content?: Array; + encrypted_content?: string | null; + status?: "in_progress" | "completed" | "incomplete"; +}; +type ResponseReasoningSummaryItem = { + text: string; + type: "summary_text"; +}; +type ResponseReasoningContentItem = { + text: string; + type: "reasoning_text"; +}; +type ResponseReasoningTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.reasoning_text.delta"; +}; +type ResponseReasoningTextDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + sequence_number: number; + text: string; + type: "response.reasoning_text.done"; +}; +type ResponseRefusalDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: "response.refusal.delta"; +}; +type ResponseRefusalDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + refusal: string; + sequence_number: number; + type: "response.refusal.done"; +}; +type ResponseStatus = "completed" | "failed" | "in_progress" | "cancelled" | "queued" | "incomplete"; +type ResponseStreamEvent = ResponseCompletedEvent | ResponseCreatedEvent | ResponseErrorEvent | ResponseFunctionCallArgumentsDeltaEvent | ResponseFunctionCallArgumentsDoneEvent | ResponseFailedEvent | ResponseIncompleteEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent | ResponseReasoningTextDeltaEvent | ResponseReasoningTextDoneEvent | ResponseRefusalDeltaEvent | ResponseRefusalDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent; +type ResponseCompletedEvent = { + response: Response; + sequence_number: number; + type: "response.completed"; +}; +type ResponseTextConfig = { + format?: ResponseFormatTextConfig; + verbosity?: "low" | "medium" | "high" | null; +}; +type ResponseTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + type: "response.output_text.delta"; +}; +type ResponseTextDoneEvent = { + content_index: number; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + text: string; + type: "response.output_text.done"; +}; +type Logprob = { + token: string; + logprob: number; + top_logprobs?: Array; +}; +type TopLogprob = { + token?: string; + logprob?: number; +}; +type ResponseUsage = { + input_tokens: number; + output_tokens: number; + total_tokens: number; +}; +type Tool = ResponsesFunctionTool; +type ToolChoiceFunction = { + name: string; + type: "function"; +}; +type ToolChoiceOptions = "none"; +type ReasoningEffort = "minimal" | "low" | "medium" | "high" | null; +type StreamOptions = { + include_obfuscation?: boolean; +}; +/** Marks keys from T that aren't in U as optional never */ +type Without = { + [P in Exclude]?: never; +}; +/** Either T or U, but not both (mutually exclusive) */ +type XOR = (T & Without) | (U & Without); +type Ai_Cf_Baai_Bge_Base_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Base_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output; +} +type Ai_Cf_Openai_Whisper_Input = string | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; +}; +interface Ai_Cf_Openai_Whisper_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper { + inputs: Ai_Cf_Openai_Whisper_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Output; +} +type Ai_Cf_Meta_M2M100_1_2B_Input = { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; + }[]; +}; +type Ai_Cf_Meta_M2M100_1_2B_Output = { + /** + * The translated text in the target language + */ + translated_text?: string; +} | Ai_Cf_Meta_M2M100_1_2B_AsyncResponse; +interface Ai_Cf_Meta_M2M100_1_2B_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Meta_M2M100_1_2B { + inputs: Ai_Cf_Meta_M2M100_1_2B_Input; + postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output; +} +type Ai_Cf_Baai_Bge_Small_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Small_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output; +} +type Ai_Cf_Baai_Bge_Large_En_V1_5_Input = { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; +} | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: "mean" | "cls"; + }[]; +}; +type Ai_Cf_Baai_Bge_Large_En_V1_5_Output = { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} | Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output; +} +type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = string | { + /** + * The input text prompt for the model to generate a response. + */ + prompt?: string; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + image: number[] | (string & NonNullable); + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; +}; +interface Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output { + description?: string; +} +declare abstract class Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M { + inputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input; + postProcessedOutputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output; +} +type Ai_Cf_Openai_Whisper_Tiny_En_Input = string | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; +}; +interface Ai_Cf_Openai_Whisper_Tiny_En_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Tiny_En { + inputs: Ai_Cf_Openai_Whisper_Tiny_En_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Tiny_En_Output; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input { + audio: string | { + body?: object; + contentType?: string; + }; + /** + * Supported tasks are 'translate' or 'transcribe'. + */ + task?: string; + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * Preprocess the audio with a voice activity detection model. + */ + vad_filter?: boolean; + /** + * A text prompt to help provide context to the model on the contents of the audio. + */ + initial_prompt?: string; + /** + * The prefix appended to the beginning of the output of the transcription and can guide the transcription result. + */ + prefix?: string; + /** + * The number of beams to use in beam search decoding. Higher values may improve accuracy at the cost of speed. + */ + beam_size?: number; + /** + * Whether to condition on previous text during transcription. Setting to false may help prevent hallucination loops. + */ + condition_on_previous_text?: boolean; + /** + * Threshold for detecting no-speech segments. Segments with no-speech probability above this value are skipped. + */ + no_speech_threshold?: number; + /** + * Threshold for filtering out segments with high compression ratio, which often indicate repetitive or hallucinated text. + */ + compression_ratio_threshold?: number; + /** + * Threshold for filtering out segments with low average log probability, indicating low confidence. + */ + log_prob_threshold?: number; + /** + * Optional threshold (in seconds) to skip silent periods that may cause hallucinations. + */ + hallucination_silence_threshold?: number; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output { + transcription_info?: { + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * The confidence level or probability of the detected language being accurate, represented as a decimal between 0 and 1. + */ + language_probability?: number; + /** + * The total duration of the original audio file, in seconds. + */ + duration?: number; + /** + * The duration of the audio after applying Voice Activity Detection (VAD) to remove silent or irrelevant sections, in seconds. + */ + duration_after_vad?: number; + }; + /** + * The complete transcription of the audio. + */ + text: string; + /** + * The total number of words in the transcription. + */ + word_count?: number; + segments?: { + /** + * The starting time of the segment within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the segment within the audio, in seconds. + */ + end?: number; + /** + * The transcription of the segment. + */ + text?: string; + /** + * The temperature used in the decoding process, controlling randomness in predictions. Lower values result in more deterministic outputs. + */ + temperature?: number; + /** + * The average log probability of the predictions for the words in this segment, indicating overall confidence. + */ + avg_logprob?: number; + /** + * The compression ratio of the input to the output, measuring how much the text was compressed during the transcription process. + */ + compression_ratio?: number; + /** + * The probability that the segment contains no speech, represented as a decimal between 0 and 1. + */ + no_speech_prob?: number; + words?: { + /** + * The individual word transcribed from the audio. + */ + word?: string; + /** + * The starting time of the word within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the word within the audio, in seconds. + */ + end?: number; + }[]; + }[]; + /** + * The transcription in WebVTT format, which includes timing and text information for use in subtitles. + */ + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo { + inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output; +} +type Ai_Cf_Baai_Bge_M3_Input = Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts | Ai_Cf_Baai_Bge_M3_Input_Embedding | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: (Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 | Ai_Cf_Baai_Bge_M3_Input_Embedding_1)[]; +}; +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding_1 { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +type Ai_Cf_Baai_Bge_M3_Output = Ai_Cf_Baai_Bge_M3_Output_Query | Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts | Ai_Cf_Baai_Bge_M3_Output_Embedding | Ai_Cf_Baai_Bge_M3_AsyncResponse; +interface Ai_Cf_Baai_Bge_M3_Output_Query { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +interface Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts { + response?: number[][]; + shape?: number[]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} +interface Ai_Cf_Baai_Bge_M3_Output_Embedding { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: "mean" | "cls"; +} +interface Ai_Cf_Baai_Bge_M3_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_M3 { + inputs: Ai_Cf_Baai_Bge_M3_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * The number of diffusion steps; higher values can improve quality but take longer. + */ + steps?: number; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output { + /** + * The generated image in Base64 format. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell { + inputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input = Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages; +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + image?: number[] | (string & NonNullable); + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; +} +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + image?: number[] | (string & NonNullable); + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * If true, the response will be streamed back incrementally. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { + /** + * The generated text response from the model + */ + response?: string; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct { + inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input = Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch { + requests?: { + /** + * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique. + */ + external_reference?: string; + /** + * Prompt for the text generation model + */ + prompt?: string; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2; + }[]; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +} | string | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast { + inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender must alternate between 'user' and 'assistant'. + */ + role: "user" | "assistant"; + /** + * The content of the message as a string. + */ + content: string; + }[]; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Dictate the output format of the generated response. + */ + response_format?: { + /** + * Set to json_object to process and output generated text as JSON. + */ + type?: string; + }; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { + response?: string | { + /** + * Whether the conversation is safe or not. + */ + safe?: boolean; + /** + * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. + */ + categories?: string[]; + }; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { + inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Input { + /** + * A query you wish to perform against the provided contexts. + */ + /** + * Number of returned results starting with the best score. + */ + top_k?: number; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Output { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { + inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input = Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages; +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct { + inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output; +} +type Ai_Cf_Qwen_Qwq_32B_Input = Ai_Cf_Qwen_Qwq_32B_Prompt | Ai_Cf_Qwen_Qwq_32B_Messages; +interface Ai_Cf_Qwen_Qwq_32B_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwq_32B_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Qwen_Qwq_32B_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwq_32B { + inputs: Ai_Cf_Qwen_Qwq_32B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input = Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages; +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct { + inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output; +} +type Ai_Cf_Google_Gemma_3_12B_It_Input = Ai_Cf_Google_Gemma_3_12B_It_Prompt | Ai_Cf_Google_Gemma_3_12B_It_Messages; +interface Ai_Cf_Google_Gemma_3_12B_It_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Google_Gemma_3_12B_It_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Google_Gemma_3_12B_It_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It { + inputs: Ai_Cf_Google_Gemma_3_12B_It_Input; + postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch; +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch { + requests: (Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner)[]; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: string | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * JSON schema that should be fufilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The tool call id. + */ + id?: string; + /** + * Specifies the type of tool (e.g., 'function'). + */ + type?: string; + /** + * Details of the function tool. + */ + function?: { + /** + * The name of the tool to be called + */ + name?: string; + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + }; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { + inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; +} +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input = Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch { + requests: (Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1)[]; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1 { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output = Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response | string | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: "chat.completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: "function"; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; + }[]; + }; + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: "text_completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index: number; + /** + * The generated text completion + */ + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8 { + inputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output; +} +interface Ai_Cf_Deepgram_Nova_3_Input { + audio: { + body: object; + contentType: string; + }; + /** + * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param. + */ + custom_topic_mode?: "extended" | "strict"; + /** + * Custom topics you want the model to detect within your input audio or text if present Submit up to 100 + */ + custom_topic?: string; + /** + * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param + */ + custom_intent_mode?: "extended" | "strict"; + /** + * Custom intents you want the model to detect within your input audio if present + */ + custom_intent?: string; + /** + * Identifies and extracts key entities from content in submitted audio + */ + detect_entities?: boolean; + /** + * Identifies the dominant language spoken in submitted audio + */ + detect_language?: boolean; + /** + * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + */ + diarize?: boolean; + /** + * Identify and extract key entities from content in submitted audio + */ + dictation?: boolean; + /** + * Specify the expected encoding of your submitted audio + */ + encoding?: "linear16" | "flac" | "mulaw" | "amr-nb" | "amr-wb" | "opus" | "speex" | "g729"; + /** + * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing + */ + extra?: string; + /** + * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um' + */ + filler_words?: boolean; + /** + * Key term prompting can boost or suppress specialized terminology and brands. + */ + keyterm?: string; + /** + * Keywords can boost or suppress specialized terminology and brands. + */ + keywords?: string; + /** + * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available. + */ + language?: string; + /** + * Spoken measurements will be converted to their corresponding abbreviations. + */ + measurements?: boolean; + /** + * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip. + */ + mip_opt_out?: boolean; + /** + * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio + */ + mode?: "general" | "medical" | "finance"; + /** + * Transcribe each audio channel independently. + */ + multichannel?: boolean; + /** + * Numerals converts numbers from written format to numerical format. + */ + numerals?: boolean; + /** + * Splits audio into paragraphs to improve transcript readability. + */ + paragraphs?: boolean; + /** + * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely. + */ + profanity_filter?: boolean; + /** + * Add punctuation and capitalization to the transcript. + */ + punctuate?: boolean; + /** + * Redaction removes sensitive information from your transcripts. + */ + redact?: string; + /** + * Search for terms or phrases in submitted audio and replaces them. + */ + replace?: string; + /** + * Search for terms or phrases in submitted audio. + */ + search?: string; + /** + * Recognizes the sentiment throughout a transcript or text. + */ + sentiment?: boolean; + /** + * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability. + */ + smart_format?: boolean; + /** + * Detect topics throughout a transcript or text. + */ + topics?: boolean; + /** + * Segments speech into meaningful semantic units. + */ + utterances?: boolean; + /** + * Seconds to wait before detecting a pause between words in submitted audio. + */ + utt_split?: number; + /** + * The number of channels in the submitted audio + */ + channels?: number; + /** + * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets. + */ + interim_results?: boolean; + /** + * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing + */ + endpointing?: string; + /** + * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets. + */ + vad_events?: boolean; + /** + * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets. + */ + utterance_end_ms?: boolean; +} +interface Ai_Cf_Deepgram_Nova_3_Output { + results?: { + channels?: { + alternatives?: { + confidence?: number; + transcript?: string; + words?: { + confidence?: number; + end?: number; + start?: number; + word?: string; + }[]; + }[]; + }[]; + summary?: { + result?: string; + short?: string; + }; + sentiments?: { + segments?: { + text?: string; + start_word?: number; + end_word?: number; + sentiment?: string; + sentiment_score?: number; + }[]; + average?: { + sentiment?: string; + sentiment_score?: number; + }; + }; + }; +} +declare abstract class Base_Ai_Cf_Deepgram_Nova_3 { + inputs: Ai_Cf_Deepgram_Nova_3_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input { + queries?: string | string[]; + /** + * Optional instruction for the task + */ + instruction?: string; + documents?: string | string[]; + text?: string | string[]; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output { + data?: number[][]; + shape?: number[]; +} +declare abstract class Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B { + inputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output; +} +type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input = { + /** + * readable stream with audio data and content-type specified for that data + */ + audio: { + body: object; + contentType: string; + }; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: "uint8" | "float32" | "float64"; +} | { + /** + * base64 encoded audio data + */ + audio: string; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: "uint8" | "float32" | "float64"; +}; +interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output { + /** + * if true, end-of-turn was detected + */ + is_complete?: boolean; + /** + * probability of the end-of-turn detection + */ + probability?: number; +} +declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 { + inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input; + postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B { + inputs: XOR; + postProcessedOutputs: XOR; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B { + inputs: XOR; + postProcessedOutputs: XOR; +} +interface Ai_Cf_Leonardo_Phoenix_1_0_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + */ + guidance?: number; + /** + * Random seed for reproducibility of the image generation + */ + seed?: number; + /** + * The height of the generated image in pixels + */ + height?: number; + /** + * The width of the generated image in pixels + */ + width?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + num_steps?: number; + /** + * Specify what to exclude from the generated images + */ + negative_prompt?: string; +} +/** + * The generated image in JPEG format + */ +type Ai_Cf_Leonardo_Phoenix_1_0_Output = string; +declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 { + inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + */ + guidance?: number; + /** + * Random seed for reproducibility of the image generation + */ + seed?: number; + /** + * The height of the generated image in pixels + */ + height?: number; + /** + * The width of the generated image in pixels + */ + width?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + num_steps?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + steps?: number; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Output { + /** + * The generated image in Base64 format. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin { + inputs: Ai_Cf_Leonardo_Lucid_Origin_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output; +} +interface Ai_Cf_Deepgram_Aura_1_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: "angus" | "asteria" | "arcas" | "orion" | "orpheus" | "athena" | "luna" | "zeus" | "perseus" | "helios" | "hera" | "stella"; + /** + * Encoding of the output audio. + */ + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: "none" | "wav" | "ogg"; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_1_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_1 { + inputs: Ai_Cf_Deepgram_Aura_1_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input { + /** + * Input text to translate. Can be a single string or a list of strings. + */ + text: string | string[]; + /** + * Target langauge to translate to + */ + target_language: "asm_Beng" | "awa_Deva" | "ben_Beng" | "bho_Deva" | "brx_Deva" | "doi_Deva" | "eng_Latn" | "gom_Deva" | "gon_Deva" | "guj_Gujr" | "hin_Deva" | "hne_Deva" | "kan_Knda" | "kas_Arab" | "kas_Deva" | "kha_Latn" | "lus_Latn" | "mag_Deva" | "mai_Deva" | "mal_Mlym" | "mar_Deva" | "mni_Beng" | "mni_Mtei" | "npi_Deva" | "ory_Orya" | "pan_Guru" | "san_Deva" | "sat_Olck" | "snd_Arab" | "snd_Deva" | "tam_Taml" | "tel_Telu" | "urd_Arab" | "unr_Deva"; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output { + /** + * Translated texts + */ + translations: string[]; +} +declare abstract class Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B { + inputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input; + postProcessedOutputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output; +} +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input = Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch { + requests: (Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1)[]; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1 { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + content: string | { + /** + * Type of the content (text) + */ + type?: string; + /** + * Text content + */ + text?: string; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ({ + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + })[]; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3 { + type?: "json_object" | "json_schema"; + json_schema?: unknown; +} +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output = Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response | string | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: "chat.completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: "function"; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; + }[]; + }; + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: "text_completion"; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index: number; + /** + * The generated text completion + */ + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It { + inputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input; + postProcessedOutputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output; +} +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Input { + /** + * Input text to embed. Can be a single string or a list of strings. + */ + text: string | string[]; +} +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Output { + /** + * Embedding vectors, where each vector is a list of floats. + */ + data: number[][]; + /** + * Shape of the embedding data as [number_of_embeddings, embedding_dimension]. + * + * @minItems 2 + * @maxItems 2 + */ + shape: [ + number, + number + ]; +} +declare abstract class Base_Ai_Cf_Pfnet_Plamo_Embedding_1B { + inputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Input; + postProcessedOutputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Output; +} +interface Ai_Cf_Deepgram_Flux_Input { + /** + * Encoding of the audio stream. Currently only supports raw signed little-endian 16-bit PCM. + */ + encoding: "linear16"; + /** + * Sample rate of the audio stream in Hz. + */ + sample_rate: string; + /** + * End-of-turn confidence required to fire an eager end-of-turn event. When set, enables EagerEndOfTurn and TurnResumed events. Valid Values 0.3 - 0.9. + */ + eager_eot_threshold?: string; + /** + * End-of-turn confidence required to finish a turn. Valid Values 0.5 - 0.9. + */ + eot_threshold?: string; + /** + * A turn will be finished when this much time has passed after speech, regardless of EOT confidence. + */ + eot_timeout_ms?: string; + /** + * Keyterm prompting can improve recognition of specialized terminology. Pass multiple keyterm query parameters to boost multiple keyterms. + */ + keyterm?: string; + /** + * Opts out requests from the Deepgram Model Improvement Program. Refer to Deepgram Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip + */ + mip_opt_out?: "true" | "false"; + /** + * Label your requests for the purpose of identification during usage reporting + */ + tag?: string; +} +/** + * Output will be returned as websocket messages. + */ +interface Ai_Cf_Deepgram_Flux_Output { + /** + * The unique identifier of the request (uuid) + */ + request_id?: string; + /** + * Starts at 0 and increments for each message the server sends to the client. + */ + sequence_id?: number; + /** + * The type of event being reported. + */ + event?: "Update" | "StartOfTurn" | "EagerEndOfTurn" | "TurnResumed" | "EndOfTurn"; + /** + * The index of the current turn + */ + turn_index?: number; + /** + * Start time in seconds of the audio range that was transcribed + */ + audio_window_start?: number; + /** + * End time in seconds of the audio range that was transcribed + */ + audio_window_end?: number; + /** + * Text that was said over the course of the current turn + */ + transcript?: string; + /** + * The words in the transcript + */ + words?: { + /** + * The individual punctuated, properly-cased word from the transcript + */ + word: string; + /** + * Confidence that this word was transcribed correctly + */ + confidence: number; + }[]; + /** + * Confidence that no more speech is coming in this turn + */ + end_of_turn_confidence?: number; +} +declare abstract class Base_Ai_Cf_Deepgram_Flux { + inputs: Ai_Cf_Deepgram_Flux_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Flux_Output; +} +interface Ai_Cf_Deepgram_Aura_2_En_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: "amalthea" | "andromeda" | "apollo" | "arcas" | "aries" | "asteria" | "athena" | "atlas" | "aurora" | "callista" | "cora" | "cordelia" | "delia" | "draco" | "electra" | "harmonia" | "helena" | "hera" | "hermes" | "hyperion" | "iris" | "janus" | "juno" | "jupiter" | "luna" | "mars" | "minerva" | "neptune" | "odysseus" | "ophelia" | "orion" | "orpheus" | "pandora" | "phoebe" | "pluto" | "saturn" | "thalia" | "theia" | "vesta" | "zeus"; + /** + * Encoding of the output audio. + */ + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: "none" | "wav" | "ogg"; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_2_En_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_En { + inputs: Ai_Cf_Deepgram_Aura_2_En_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_En_Output; +} +interface Ai_Cf_Deepgram_Aura_2_Es_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: "sirio" | "nestor" | "carina" | "celeste" | "alvaro" | "diana" | "aquila" | "selena" | "estrella" | "javier"; + /** + * Encoding of the output audio. + */ + encoding?: "linear16" | "flac" | "mulaw" | "alaw" | "mp3" | "opus" | "aac"; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: "none" | "wav" | "ogg"; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_2_Es_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_Es { + inputs: Ai_Cf_Deepgram_Aura_2_Es_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_Es_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Dev { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Dev_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Input { + multipart: { + body?: object; + contentType?: string; + }; +} +interface Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Output { + /** + * Generated image as Base64 string. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B { + inputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B_Output; +} +declare abstract class Base_Ai_Cf_Zai_Org_Glm_4_7_Flash { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +declare abstract class Base_Ai_Cf_Moonshotai_Kimi_K2_5 { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +declare abstract class Base_Ai_Cf_Nvidia_Nemotron_3_120B_A12B { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +declare abstract class Base_Ai_Cf_Google_Gemma_4_26B_A4B_IT { + inputs: ChatCompletionsInput; + postProcessedOutputs: ChatCompletionsOutput; +} +interface AiModels { + "@cf/huggingface/distilbert-sst-2-int8": BaseAiTextClassification; + "@cf/stabilityai/stable-diffusion-xl-base-1.0": BaseAiTextToImage; + "@cf/runwayml/stable-diffusion-v1-5-inpainting": BaseAiTextToImage; + "@cf/runwayml/stable-diffusion-v1-5-img2img": BaseAiTextToImage; + "@cf/lykon/dreamshaper-8-lcm": BaseAiTextToImage; + "@cf/bytedance/stable-diffusion-xl-lightning": BaseAiTextToImage; + "@cf/myshell-ai/melotts": BaseAiTextToSpeech; + "@cf/google/embeddinggemma-300m": BaseAiTextEmbeddings; + "@cf/microsoft/resnet-50": BaseAiImageClassification; + "@cf/meta/llama-2-7b-chat-int8": BaseAiTextGeneration; + "@cf/mistral/mistral-7b-instruct-v0.1": BaseAiTextGeneration; + "@cf/meta/llama-2-7b-chat-fp16": BaseAiTextGeneration; + "@hf/thebloke/llama-2-13b-chat-awq": BaseAiTextGeneration; + "@hf/thebloke/mistral-7b-instruct-v0.1-awq": BaseAiTextGeneration; + "@hf/thebloke/zephyr-7b-beta-awq": BaseAiTextGeneration; + "@hf/thebloke/openhermes-2.5-mistral-7b-awq": BaseAiTextGeneration; + "@hf/thebloke/neural-chat-7b-v3-1-awq": BaseAiTextGeneration; + "@hf/thebloke/deepseek-coder-6.7b-base-awq": BaseAiTextGeneration; + "@hf/thebloke/deepseek-coder-6.7b-instruct-awq": BaseAiTextGeneration; + "@cf/deepseek-ai/deepseek-math-7b-instruct": BaseAiTextGeneration; + "@cf/defog/sqlcoder-7b-2": BaseAiTextGeneration; + "@cf/openchat/openchat-3.5-0106": BaseAiTextGeneration; + "@cf/tiiuae/falcon-7b-instruct": BaseAiTextGeneration; + "@cf/thebloke/discolm-german-7b-v1-awq": BaseAiTextGeneration; + "@cf/qwen/qwen1.5-0.5b-chat": BaseAiTextGeneration; + "@cf/qwen/qwen1.5-7b-chat-awq": BaseAiTextGeneration; + "@cf/qwen/qwen1.5-14b-chat-awq": BaseAiTextGeneration; + "@cf/tinyllama/tinyllama-1.1b-chat-v1.0": BaseAiTextGeneration; + "@cf/microsoft/phi-2": BaseAiTextGeneration; + "@cf/qwen/qwen1.5-1.8b-chat": BaseAiTextGeneration; + "@cf/mistral/mistral-7b-instruct-v0.2-lora": BaseAiTextGeneration; + "@hf/nousresearch/hermes-2-pro-mistral-7b": BaseAiTextGeneration; + "@hf/nexusflow/starling-lm-7b-beta": BaseAiTextGeneration; + "@hf/google/gemma-7b-it": BaseAiTextGeneration; + "@cf/meta-llama/llama-2-7b-chat-hf-lora": BaseAiTextGeneration; + "@cf/google/gemma-2b-it-lora": BaseAiTextGeneration; + "@cf/google/gemma-7b-it-lora": BaseAiTextGeneration; + "@hf/mistral/mistral-7b-instruct-v0.2": BaseAiTextGeneration; + "@cf/meta/llama-3-8b-instruct": BaseAiTextGeneration; + "@cf/fblgit/una-cybertron-7b-v2-bf16": BaseAiTextGeneration; + "@cf/meta/llama-3-8b-instruct-awq": BaseAiTextGeneration; + "@cf/meta/llama-3.1-8b-instruct-fp8": BaseAiTextGeneration; + "@cf/meta/llama-3.1-8b-instruct-awq": BaseAiTextGeneration; + "@cf/meta/llama-3.2-3b-instruct": BaseAiTextGeneration; + "@cf/meta/llama-3.2-1b-instruct": BaseAiTextGeneration; + "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b": BaseAiTextGeneration; + "@cf/ibm-granite/granite-4.0-h-micro": BaseAiTextGeneration; + "@cf/facebook/bart-large-cnn": BaseAiSummarization; + "@cf/llava-hf/llava-1.5-7b-hf": BaseAiImageToText; + "@cf/baai/bge-base-en-v1.5": Base_Ai_Cf_Baai_Bge_Base_En_V1_5; + "@cf/openai/whisper": Base_Ai_Cf_Openai_Whisper; + "@cf/meta/m2m100-1.2b": Base_Ai_Cf_Meta_M2M100_1_2B; + "@cf/baai/bge-small-en-v1.5": Base_Ai_Cf_Baai_Bge_Small_En_V1_5; + "@cf/baai/bge-large-en-v1.5": Base_Ai_Cf_Baai_Bge_Large_En_V1_5; + "@cf/unum/uform-gen2-qwen-500m": Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M; + "@cf/openai/whisper-tiny-en": Base_Ai_Cf_Openai_Whisper_Tiny_En; + "@cf/openai/whisper-large-v3-turbo": Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo; + "@cf/baai/bge-m3": Base_Ai_Cf_Baai_Bge_M3; + "@cf/black-forest-labs/flux-1-schnell": Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell; + "@cf/meta/llama-3.2-11b-vision-instruct": Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct; + "@cf/meta/llama-3.3-70b-instruct-fp8-fast": Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast; + "@cf/meta/llama-guard-3-8b": Base_Ai_Cf_Meta_Llama_Guard_3_8B; + "@cf/baai/bge-reranker-base": Base_Ai_Cf_Baai_Bge_Reranker_Base; + "@cf/qwen/qwen2.5-coder-32b-instruct": Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct; + "@cf/qwen/qwq-32b": Base_Ai_Cf_Qwen_Qwq_32B; + "@cf/mistralai/mistral-small-3.1-24b-instruct": Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct; + "@cf/google/gemma-3-12b-it": Base_Ai_Cf_Google_Gemma_3_12B_It; + "@cf/meta/llama-4-scout-17b-16e-instruct": Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct; + "@cf/qwen/qwen3-30b-a3b-fp8": Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8; + "@cf/deepgram/nova-3": Base_Ai_Cf_Deepgram_Nova_3; + "@cf/qwen/qwen3-embedding-0.6b": Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B; + "@cf/pipecat-ai/smart-turn-v2": Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2; + "@cf/openai/gpt-oss-120b": Base_Ai_Cf_Openai_Gpt_Oss_120B; + "@cf/openai/gpt-oss-20b": Base_Ai_Cf_Openai_Gpt_Oss_20B; + "@cf/leonardo/phoenix-1.0": Base_Ai_Cf_Leonardo_Phoenix_1_0; + "@cf/leonardo/lucid-origin": Base_Ai_Cf_Leonardo_Lucid_Origin; + "@cf/deepgram/aura-1": Base_Ai_Cf_Deepgram_Aura_1; + "@cf/ai4bharat/indictrans2-en-indic-1B": Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B; + "@cf/aisingapore/gemma-sea-lion-v4-27b-it": Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It; + "@cf/pfnet/plamo-embedding-1b": Base_Ai_Cf_Pfnet_Plamo_Embedding_1B; + "@cf/deepgram/flux": Base_Ai_Cf_Deepgram_Flux; + "@cf/deepgram/aura-2-en": Base_Ai_Cf_Deepgram_Aura_2_En; + "@cf/deepgram/aura-2-es": Base_Ai_Cf_Deepgram_Aura_2_Es; + "@cf/black-forest-labs/flux-2-dev": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Dev; + "@cf/black-forest-labs/flux-2-klein-4b": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_4B; + "@cf/black-forest-labs/flux-2-klein-9b": Base_Ai_Cf_Black_Forest_Labs_Flux_2_Klein_9B; + "@cf/zai-org/glm-4.7-flash": Base_Ai_Cf_Zai_Org_Glm_4_7_Flash; + "@cf/moonshotai/kimi-k2.5": Base_Ai_Cf_Moonshotai_Kimi_K2_5; + "@cf/nvidia/nemotron-3-120b-a12b": Base_Ai_Cf_Nvidia_Nemotron_3_120B_A12B; +} +type AiOptions = { + /** + * Send requests as an asynchronous batch job, only works for supported models + * https://developers.cloudflare.com/workers-ai/features/batch-api + */ + queueRequest?: boolean; + /** + * Establish websocket connections, only works for supported models + */ + websocket?: boolean; + /** + * Tag your requests to group and view them in Cloudflare dashboard. + * + * Rules: + * Tags must only contain letters, numbers, and the symbols: : - . / @ + * Each tag can have maximum 50 characters. + * Maximum 5 tags are allowed each request. + * Duplicate tags will removed. + */ + tags?: string[]; + gateway?: GatewayOptions; + returnRawResponse?: boolean; + prefix?: string; + extraHeaders?: object; + signal?: AbortSignal; +}; +type AiModelsSearchParams = { + author?: string; + hide_experimental?: boolean; + page?: number; + per_page?: number; + search?: string; + source?: number; + task?: string; +}; +type AiModelsSearchObject = { + id: string; + source: number; + name: string; + description: string; + task: { + id: string; + name: string; + description: string; + }; + tags: string[]; + properties: { + property_id: string; + value: string; + }[]; +}; +type ChatCompletionsBase = XOR; +type ChatCompletionsInput = XOR; +interface InferenceUpstreamError extends Error { +} +interface AiInternalError extends Error { +} +type AiModelListType = Record; +type AiAsyncBatchResponse = { + request_id: string; +}; +declare abstract class Ai { + aiGatewayLogId: string | null; + gateway(gatewayId: string): AiGateway; + /** + * @deprecated Use the standalone `ai_search_namespaces` or `ai_search` Workers bindings instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + aiSearch(): AiSearchNamespace; + /** + * @deprecated AutoRAG has been replaced by AI Search. + * Use the standalone `ai_search_namespaces` or `ai_search` Workers bindings instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + * + * @param autoragId Instance ID + */ + autorag(autoragId: string): AutoRAG; + // Batch request + run(model: Name, inputs: { + requests: AiModelList[Name]['inputs'][]; + }, options: AiOptions & { + queueRequest: true; + }): Promise; + // Raw response + run(model: Name, inputs: AiModelList[Name]['inputs'], options: AiOptions & { + returnRawResponse: true; + }): Promise; + // WebSocket + run(model: Name, inputs: AiModelList[Name]['inputs'], options: AiOptions & { + websocket: true; + }): Promise; + // Streaming + run(model: Name, inputs: AiModelList[Name]['inputs'] & { + stream: true; + }, options?: AiOptions): Promise; + // Normal (default) - known model + run(model: Name, inputs: AiModelList[Name]['inputs'], options?: AiOptions): Promise; + // Unknown model (gateway fallback) + run(model: string & {}, inputs: Record, options?: AiOptions): Promise>; + models(params?: AiModelsSearchParams): Promise; + toMarkdown(): ToMarkdownService; + toMarkdown(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + toMarkdown(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; +} +type GatewayRetries = { + maxAttempts?: 1 | 2 | 3 | 4 | 5; + retryDelayMs?: number; + backoff?: 'constant' | 'linear' | 'exponential'; +}; +type GatewayOptions = { + id: string; + cacheKey?: string; + cacheTtl?: number; + skipCache?: boolean; + metadata?: Record; + collectLog?: boolean; + eventId?: string; + requestTimeoutMs?: number; + retries?: GatewayRetries; +}; +type UniversalGatewayOptions = Exclude & { + /** + ** @deprecated + */ + id?: string; +}; +type AiGatewayPatchLog = { + score?: number | null; + feedback?: -1 | 1 | null; + metadata?: Record | null; +}; +type AiGatewayLog = { + id: string; + provider: string; + model: string; + model_type?: string; + path: string; + duration: number; + request_type?: string; + request_content_type?: string; + status_code: number; + response_content_type?: string; + success: boolean; + cached: boolean; + tokens_in?: number; + tokens_out?: number; + metadata?: Record; + step?: number; + cost?: number; + custom_cost?: boolean; + request_size: number; + request_head?: string; + request_head_complete: boolean; + response_size: number; + response_head?: string; + response_head_complete: boolean; + created_at: Date; +}; +type AIGatewayProviders = 'workers-ai' | 'anthropic' | 'aws-bedrock' | 'azure-openai' | 'google-vertex-ai' | 'huggingface' | 'openai' | 'perplexity-ai' | 'replicate' | 'groq' | 'cohere' | 'google-ai-studio' | 'mistral' | 'grok' | 'openrouter' | 'deepseek' | 'cerebras' | 'cartesia' | 'elevenlabs' | 'adobe-firefly'; +type AIGatewayHeaders = { + 'cf-aig-metadata': Record | string; + 'cf-aig-custom-cost': { + per_token_in?: number; + per_token_out?: number; + } | { + total_cost?: number; + } | string; + 'cf-aig-cache-ttl': number | string; + 'cf-aig-skip-cache': boolean | string; + 'cf-aig-cache-key': string; + 'cf-aig-event-id': string; + 'cf-aig-request-timeout': number | string; + 'cf-aig-max-attempts': number | string; + 'cf-aig-retry-delay': number | string; + 'cf-aig-backoff': string; + 'cf-aig-collect-log': boolean | string; + Authorization: string; + 'Content-Type': string; + [key: string]: string | number | boolean | object; +}; +type AIGatewayUniversalRequest = { + provider: AIGatewayProviders | string; // eslint-disable-line + endpoint: string; + headers: Partial; + query: unknown; +}; +interface AiGatewayInternalError extends Error { +} +interface AiGatewayLogNotFound extends Error { +} +declare abstract class AiGateway { + patchLog(logId: string, data: AiGatewayPatchLog): Promise; + getLog(logId: string): Promise; + run(data: AIGatewayUniversalRequest | AIGatewayUniversalRequest[], options?: { + gateway?: UniversalGatewayOptions; + extraHeaders?: object; + signal?: AbortSignal; + }): Promise; + getUrl(provider?: AIGatewayProviders | string): Promise; // eslint-disable-line +} +// Copyright (c) 2022-2025 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +/** + * Artifacts — Git-compatible file storage on Cloudflare Workers. + * + * Provides programmatic access to create, manage, and fork repositories, + * and to issue and revoke scoped access tokens. + */ +/** Information about a repository. */ +interface ArtifactsRepoInfo { + /** Unique repository ID. */ + id: string; + /** Repository name. */ + name: string; + /** Repository description, or null if not set. */ + description: string | null; + /** Default branch name (e.g. "main"). */ + defaultBranch: string; + /** ISO 8601 creation timestamp. */ + createdAt: string; + /** ISO 8601 last-updated timestamp. */ + updatedAt: string; + /** ISO 8601 timestamp of the last push, or null if never pushed. */ + lastPushAt: string | null; + /** Fork source (e.g. "github:owner/repo", "artifacts:namespace/repo"), or null if not a fork. */ + source: string | null; + /** Whether the repository is read-only. */ + readOnly: boolean; + /** HTTPS git remote URL. */ + remote: string; +} +/** Result of creating a repository — includes the initial access token. */ +interface ArtifactsCreateRepoResult { + /** Unique repository ID. */ + id: string; + /** Repository name. */ + name: string; + /** Repository description, or null if not set. */ + description: string | null; + /** Default branch name. */ + defaultBranch: string; + /** HTTPS git remote URL. */ + remote: string; + /** Plaintext access token (only returned at creation time). */ + token: string; + /** ISO 8601 token expiry timestamp. */ + tokenExpiresAt: string; +} +/** Paginated list of repositories. */ +interface ArtifactsRepoListResult { + /** Repositories in this page (without the `remote` field). */ + repos: Omit[]; + /** Total number of repositories in the namespace. */ + total: number; + /** Cursor for the next page, if there are more results. */ + cursor?: string; +} +/** Result of creating an access token. */ +interface ArtifactsCreateTokenResult { + /** Unique token ID. */ + id: string; + /** Plaintext token (only returned at creation time). */ + plaintext: string; + /** Token scope: "read" or "write". */ + scope: 'read' | 'write'; + /** ISO 8601 token expiry timestamp. */ + expiresAt: string; +} +/** Token metadata (no plaintext). */ +interface ArtifactsTokenInfo { + /** Unique token ID. */ + id: string; + /** Token scope: "read" or "write". */ + scope: 'read' | 'write'; + /** Token state: "active", "expired", or "revoked". */ + state: 'active' | 'expired' | 'revoked'; + /** ISO 8601 creation timestamp. */ + createdAt: string; + /** ISO 8601 expiry timestamp. */ + expiresAt: string; +} +/** Paginated list of tokens for a repository. */ +interface ArtifactsTokenListResult { + /** Tokens in this page. */ + tokens: ArtifactsTokenInfo[]; + /** Total number of tokens for the repository. */ + total: number; +} +/** Handle for a single repository. Returned by Artifacts.get(). */ +interface ArtifactsRepo extends ArtifactsRepoInfo { + /** + * Create an access token for this repo. + * @param scope Token scope: "write" (default) or "read". + * @param ttl Time-to-live in seconds (default 86400, min 60, max 31536000). + */ + createToken(scope?: 'write' | 'read', ttl?: number): Promise; + /** List tokens for this repo (metadata only, no plaintext). */ + listTokens(): Promise; + /** + * Revoke a token by plaintext or ID. + * @param tokenOrId Plaintext token or token ID. + * @returns true if revoked, false if not found. + */ + revokeToken(tokenOrId: string): Promise; + // ── Fork ── + /** + * Fork this repo to a new repo. + * @param name Target repository name. + * @param opts Optional: description, readOnly flag, defaultBranchOnly (default true). + */ + fork(name: string, opts?: { + description?: string; + readOnly?: boolean; + defaultBranchOnly?: boolean; + }): Promise; +} +/** Artifacts binding — namespace-level operations. */ +interface Artifacts { + /** + * Create a new repository with an initial access token. + * @param name Repository name (alphanumeric, dots, hyphens, underscores). + * @param opts Optional: readOnly flag, description, default branch name. + * @returns Repo metadata with initial token. + */ + create(name: string, opts?: { + readOnly?: boolean; + description?: string; + setDefaultBranch?: string; + }): Promise; + /** + * Get a handle to an existing repository. + * @param name Repository name. + * @returns Repo handle. + */ + get(name: string): Promise; + /** + * Import a repository from an external git remote. + * @param params Source URL and optional branch/depth, plus target name and options. + * @returns Repo metadata with initial token. + */ + import(params: { + source: { + url: string; + branch?: string; + depth?: number; + }; + target: { + name: string; + opts?: { + description?: string; + readOnly?: boolean; + }; + }; + }): Promise; + /** + * List repositories with cursor-based pagination. + * @param opts Optional: limit (1–200, default 50), cursor for next page. + */ + list(opts?: { + limit?: number; + cursor?: string; + }): Promise; + /** + * Delete a repository and all associated tokens. + * @param name Repository name. + * @returns true if deleted, false if not found. + */ + delete(name: string): Promise; +} +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +interface AutoRAGInternalError extends Error { +} +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +interface AutoRAGNotFoundError extends Error { +} +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +interface AutoRAGUnauthorizedError extends Error { +} +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +interface AutoRAGNameNotSetError extends Error { +} +type ComparisonFilter = { + key: string; + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + value: string | number | boolean; +}; +type CompoundFilter = { + type: 'and' | 'or'; + filters: ComparisonFilter[]; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagSearchRequest = { + query: string; + filters?: CompoundFilter | ComparisonFilter; + max_num_results?: number; + ranking_options?: { + ranker?: string; + score_threshold?: number; + }; + reranking?: { + enabled?: boolean; + model?: string; + }; + rewrite_query?: boolean; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagAiSearchRequest = AutoRagSearchRequest & { + stream?: boolean; + system_prompt?: string; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagAiSearchRequestStreaming = Omit & { + stream: true; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagSearchResponse = { + object: 'vector_store.search_results.page'; + search_query: string; + data: { + file_id: string; + filename: string; + score: number; + attributes: Record; + content: { + type: 'text'; + text: string; + }[]; + }[]; + has_more: boolean; + next_page: string | null; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagListResponse = { + id: string; + enable: boolean; + type: string; + source: string; + vectorize_name: string; + paused: boolean; + status: string; +}[]; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +type AutoRagAiSearchResponse = AutoRagSearchResponse & { + response: string; +}; +/** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ +declare abstract class AutoRAG { + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + list(): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + search(params: AutoRagSearchRequest): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + aiSearch(params: AutoRagAiSearchRequestStreaming): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + aiSearch(params: AutoRagAiSearchRequest): Promise; + /** + * @deprecated Use the standalone AI Search Workers binding instead. + * See https://developers.cloudflare.com/ai-search/usage/workers-binding/ + */ + aiSearch(params: AutoRagAiSearchRequest): Promise; +} +interface BasicImageTransformations { + /** + * Maximum width in image pixels. The value must be an integer. + */ + width?: number; + /** + * Maximum height in image pixels. The value must be an integer. + */ + height?: number; + /** + * Resizing mode as a string. It affects interpretation of width and height + * options: + * - scale-down: Similar to contain, but the image is never enlarged. If + * the image is larger than given width or height, it will be resized. + * Otherwise its original size will be kept. + * - contain: Resizes to maximum size that fits within the given width and + * height. If only a single dimension is given (e.g. only width), the + * image will be shrunk or enlarged to exactly match that dimension. + * Aspect ratio is always preserved. + * - cover: Resizes (shrinks or enlarges) to fill the entire area of width + * and height. If the image has an aspect ratio different from the ratio + * of width and height, it will be cropped to fit. + * - crop: The image will be shrunk and cropped to fit within the area + * specified by width and height. The image will not be enlarged. For images + * smaller than the given dimensions it's the same as scale-down. For + * images larger than the given dimensions, it's the same as cover. + * See also trim. + * - pad: Resizes to the maximum size that fits within the given width and + * height, and then fills the remaining area with a background color + * (white by default). Use of this mode is not recommended, as the same + * effect can be more efficiently achieved with the contain mode and the + * CSS object-fit: contain property. + * - squeeze: Stretches and deforms to the width and height given, even if it + * breaks aspect ratio + */ + fit?: "scale-down" | "contain" | "cover" | "crop" | "pad" | "squeeze"; + /** + * Image segmentation using artificial intelligence models. Sets pixels not + * within selected segment area to transparent e.g "foreground" sets every + * background pixel as transparent. + */ + segment?: "foreground"; + /** + * When cropping with fit: "cover", this defines the side or point that should + * be left uncropped. The value is either a string + * "left", "right", "top", "bottom", "auto", or "center" (the default), + * or an object {x, y} containing focal point coordinates in the original + * image expressed as fractions ranging from 0.0 (top or left) to 1.0 + * (bottom or right), 0.5 being the center. {fit: "cover", gravity: "top"} will + * crop bottom or left and right sides as necessary, but won’t crop anything + * from the top. {fit: "cover", gravity: {x:0.5, y:0.2}} will crop each side to + * preserve as much as possible around a point at 20% of the height of the + * source image. + */ + gravity?: 'face' | 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | BasicImageTransformationsGravityCoordinates; + /** + * Background color to add underneath the image. Applies only to images with + * transparency (such as PNG). Accepts any CSS color (#RRGGBB, rgba(…), + * hsl(…), etc.) + */ + background?: string; + /** + * Number of degrees (90, 180, 270) to rotate the image by. width and height + * options refer to axes after rotation. + */ + rotate?: 0 | 90 | 180 | 270 | 360; +} +interface BasicImageTransformationsGravityCoordinates { + x?: number; + y?: number; + mode?: 'remainder' | 'box-center'; +} +/** + * In addition to the properties you can set in the RequestInit dict + * that you pass as an argument to the Request constructor, you can + * set certain properties of a `cf` object to control how Cloudflare + * features are applied to that new Request. + * + * Note: Currently, these properties cannot be tested in the + * playground. + */ +interface RequestInitCfProperties extends Record { + cacheEverything?: boolean; + /** + * A request's cache key is what determines if two requests are + * "the same" for caching purposes. If a request has the same cache key + * as some previous request, then we can serve the same cached response for + * both. (e.g. 'some-key') + * + * Only available for Enterprise customers. + */ + cacheKey?: string; + /** + * This allows you to append additional Cache-Tag response headers + * to the origin response without modifications to the origin server. + * This will allow for greater control over the Purge by Cache Tag feature + * utilizing changes only in the Workers process. + * + * Only available for Enterprise customers. + */ + cacheTags?: string[]; + /** + * Force response to be cached for a given number of seconds. (e.g. 300) + */ + cacheTtl?: number; + /** + * Force response to be cached for a given number of seconds based on the Origin status code. + * (e.g. { '200-299': 86400, '404': 1, '500-599': 0 }) + */ + cacheTtlByStatus?: Record; + /** + * Explicit Cache-Control header value to set on the response stored in cache. + * This gives full control over cache directives (e.g. 'public, max-age=3600, s-maxage=86400'). + * + * Cannot be used together with `cacheTtl` or the `cache` request option (`no-store`/`no-cache`), + * as these are mutually exclusive cache control mechanisms. Setting both will throw a TypeError. + * + * Can be used together with `cacheTtlByStatus`. + */ + cacheControl?: string; + /** + * Whether the response should be eligible for Cache Reserve storage. + */ + cacheReserveEligible?: boolean; + /** + * Whether to respect strong ETags (as opposed to weak ETags) from the origin. + */ + respectStrongEtag?: boolean; + /** + * Whether to strip ETag headers from the origin response before caching. + */ + stripEtags?: boolean; + /** + * Whether to strip Last-Modified headers from the origin response before caching. + */ + stripLastModified?: boolean; + /** + * Whether to enable Cache Deception Armor, which protects against web cache + * deception attacks by verifying the Content-Type matches the URL extension. + */ + cacheDeceptionArmor?: boolean; + /** + * Minimum file size in bytes for a response to be eligible for Cache Reserve storage. + */ + cacheReserveMinimumFileSize?: number; + scrapeShield?: boolean; + apps?: boolean; + image?: RequestInitCfPropertiesImage; + minify?: RequestInitCfPropertiesImageMinify; + mirage?: boolean; + polish?: "lossy" | "lossless" | "off"; + r2?: RequestInitCfPropertiesR2; + /** + * Redirects the request to an alternate origin server. You can use this, + * for example, to implement load balancing across several origins. + * (e.g.us-east.example.com) + * + * Note - For security reasons, the hostname set in resolveOverride must + * be proxied on the same Cloudflare zone of the incoming request. + * Otherwise, the setting is ignored. CNAME hosts are allowed, so to + * resolve to a host under a different domain or a DNS only domain first + * declare a CNAME record within your own zone’s DNS mapping to the + * external hostname, set proxy on Cloudflare, then set resolveOverride + * to point to that CNAME record. + */ + resolveOverride?: string; +} +interface RequestInitCfPropertiesImageDraw extends BasicImageTransformations { + /** + * Absolute URL of the image file to use for the drawing. It can be any of + * the supported file formats. For drawing of watermarks or non-rectangular + * overlays we recommend using PNG or WebP images. + */ + url: string; + /** + * Floating-point number between 0 (transparent) and 1 (opaque). + * For example, opacity: 0.5 makes overlay semitransparent. + */ + opacity?: number; + /** + * - If set to true, the overlay image will be tiled to cover the entire + * area. This is useful for stock-photo-like watermarks. + * - If set to "x", the overlay image will be tiled horizontally only + * (form a line). + * - If set to "y", the overlay image will be tiled vertically only + * (form a line). + */ + repeat?: true | "x" | "y"; + /** + * Position of the overlay image relative to a given edge. Each property is + * an offset in pixels. 0 aligns exactly to the edge. For example, left: 10 + * positions left side of the overlay 10 pixels from the left edge of the + * image it's drawn over. bottom: 0 aligns bottom of the overlay with bottom + * of the background image. + * + * Setting both left & right, or both top & bottom is an error. + * + * If no position is specified, the image will be centered. + */ + top?: number; + left?: number; + bottom?: number; + right?: number; +} +interface RequestInitCfPropertiesImage extends BasicImageTransformations { + /** + * Device Pixel Ratio. Default 1. Multiplier for width/height that makes it + * easier to specify higher-DPI sizes in . + */ + dpr?: number; + /** + * Allows you to trim your image. Takes dpr into account and is performed before + * resizing or rotation. + * + * It can be used as: + * - left, top, right, bottom - it will specify the number of pixels to cut + * off each side + * - width, height - the width/height you'd like to end up with - can be used + * in combination with the properties above + * - border - this will automatically trim the surroundings of an image based on + * it's color. It consists of three properties: + * - color: rgb or hex representation of the color you wish to trim (todo: verify the rgba bit) + * - tolerance: difference from color to treat as color + * - keep: the number of pixels of border to keep + */ + trim?: "border" | { + top?: number; + bottom?: number; + left?: number; + right?: number; + width?: number; + height?: number; + border?: boolean | { + color?: string; + tolerance?: number; + keep?: number; + }; + }; + /** + * Quality setting from 1-100 (useful values are in 60-90 range). Lower values + * make images look worse, but load faster. The default is 85. It applies only + * to JPEG and WebP images. It doesn’t have any effect on PNG. + */ + quality?: number | "low" | "medium-low" | "medium-high" | "high"; + /** + * Output format to generate. It can be: + * - avif: generate images in AVIF format. + * - webp: generate images in Google WebP format. Set quality to 100 to get + * the WebP-lossless format. + * - json: instead of generating an image, outputs information about the + * image, in JSON format. The JSON object will contain image size + * (before and after resizing), source image’s MIME type, file size, etc. + * - jpeg: generate images in JPEG format. + * - png: generate images in PNG format. + */ + format?: "avif" | "webp" | "json" | "jpeg" | "png" | "baseline-jpeg" | "png-force" | "svg"; + /** + * Whether to preserve animation frames from input files. Default is true. + * Setting it to false reduces animations to still images. This setting is + * recommended when enlarging images or processing arbitrary user content, + * because large GIF animations can weigh tens or even hundreds of megabytes. + * It is also useful to set anim:false when using format:"json" to get the + * response quicker without the number of frames. + */ + anim?: boolean; + /** + * What EXIF data should be preserved in the output image. Note that EXIF + * rotation and embedded color profiles are always applied ("baked in" into + * the image), and aren't affected by this option. Note that if the Polish + * feature is enabled, all metadata may have been removed already and this + * option may have no effect. + * - keep: Preserve most of EXIF metadata, including GPS location if there's + * any. + * - copyright: Only keep the copyright tag, and discard everything else. + * This is the default behavior for JPEG files. + * - none: Discard all invisible EXIF metadata. Currently WebP and PNG + * output formats always discard metadata. + */ + metadata?: "keep" | "copyright" | "none"; + /** + * Strength of sharpening filter to apply to the image. Floating-point + * number between 0 (no sharpening, default) and 10 (maximum). 1.0 is a + * recommended value for downscaled images. + */ + sharpen?: number; + /** + * Radius of a blur filter (approximate gaussian). Maximum supported radius + * is 250. + */ + blur?: number; + /** + * Overlays are drawn in the order they appear in the array (last array + * entry is the topmost layer). + */ + draw?: RequestInitCfPropertiesImageDraw[]; + /** + * Fetching image from authenticated origin. Setting this property will + * pass authentication headers (Authorization, Cookie, etc.) through to + * the origin. + */ + "origin-auth"?: "share-publicly"; + /** + * Adds a border around the image. The border is added after resizing. Border + * width takes dpr into account, and can be specified either using a single + * width property, or individually for each side. + */ + border?: { + color: string; + width: number; + } | { + color: string; + top: number; + right: number; + bottom: number; + left: number; + }; + /** + * Increase brightness by a factor. A value of 1.0 equals no change, a value + * of 0.5 equals half brightness, and a value of 2.0 equals twice as bright. + * 0 is ignored. + */ + brightness?: number; + /** + * Increase contrast by a factor. A value of 1.0 equals no change, a value of + * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is + * ignored. + */ + contrast?: number; + /** + * Increase exposure by a factor. A value of 1.0 equals no change, a value of + * 0.5 darkens the image, and a value of 2.0 lightens the image. 0 is ignored. + */ + gamma?: number; + /** + * Increase contrast by a factor. A value of 1.0 equals no change, a value of + * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is + * ignored. + */ + saturation?: number; + /** + * Flips the images horizontally, vertically, or both. Flipping is applied before + * rotation, so if you apply flip=h,rotate=90 then the image will be flipped + * horizontally, then rotated by 90 degrees. + */ + flip?: 'h' | 'v' | 'hv'; + /** + * Slightly reduces latency on a cache miss by selecting a + * quickest-to-compress file format, at a cost of increased file size and + * lower image quality. It will usually override the format option and choose + * JPEG over WebP or AVIF. We do not recommend using this option, except in + * unusual circumstances like resizing uncacheable dynamically-generated + * images. + */ + compression?: "fast"; +} +interface RequestInitCfPropertiesImageMinify { + javascript?: boolean; + css?: boolean; + html?: boolean; +} +interface RequestInitCfPropertiesR2 { + /** + * Colo id of bucket that an object is stored in + */ + bucketColoId?: number; +} +/** + * Request metadata provided by Cloudflare's edge. + */ +type IncomingRequestCfProperties = IncomingRequestCfPropertiesBase & IncomingRequestCfPropertiesBotManagementEnterprise & IncomingRequestCfPropertiesCloudflareForSaaSEnterprise & IncomingRequestCfPropertiesGeographicInformation & IncomingRequestCfPropertiesCloudflareAccessOrApiShield; +interface IncomingRequestCfPropertiesBase extends Record { + /** + * [ASN](https://www.iana.org/assignments/as-numbers/as-numbers.xhtml) of the incoming request. + * + * @example 395747 + */ + asn?: number; + /** + * The organization which owns the ASN of the incoming request. + * + * @example "Google Cloud" + */ + asOrganization?: string; + /** + * The original value of the `Accept-Encoding` header if Cloudflare modified it. + * + * @example "gzip, deflate, br" + */ + clientAcceptEncoding?: string; + /** + * The number of milliseconds it took for the request to reach your worker. + * + * @example 22 + */ + clientTcpRtt?: number; + /** + * The three-letter [IATA](https://en.wikipedia.org/wiki/IATA_airport_code) + * airport code of the data center that the request hit. + * + * @example "DFW" + */ + colo: string; + /** + * Represents the upstream's response to a + * [TCP `keepalive` message](https://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html) + * from cloudflare. + * + * For workers with no upstream, this will always be `1`. + * + * @example 3 + */ + edgeRequestKeepAliveStatus: IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus; + /** + * The HTTP Protocol the request used. + * + * @example "HTTP/2" + */ + httpProtocol: string; + /** + * The browser-requested prioritization information in the request object. + * + * If no information was set, defaults to the empty string `""` + * + * @example "weight=192;exclusive=0;group=3;group-weight=127" + * @default "" + */ + requestPriority: string; + /** + * The TLS version of the connection to Cloudflare. + * In requests served over plaintext (without TLS), this property is the empty string `""`. + * + * @example "TLSv1.3" + */ + tlsVersion: string; + /** + * The cipher for the connection to Cloudflare. + * In requests served over plaintext (without TLS), this property is the empty string `""`. + * + * @example "AEAD-AES128-GCM-SHA256" + */ + tlsCipher: string; + /** + * Metadata containing the [`HELLO`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2) and [`FINISHED`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9) messages from this request's TLS handshake. + * + * If the incoming request was served over plaintext (without TLS) this field is undefined. + */ + tlsExportedAuthenticator?: IncomingRequestCfPropertiesExportedAuthenticatorMetadata; +} +interface IncomingRequestCfPropertiesBotManagementBase { + /** + * Cloudflare’s [level of certainty](https://developers.cloudflare.com/bots/concepts/bot-score/) that a request comes from a bot, + * represented as an integer percentage between `1` (almost certainly a bot) and `99` (almost certainly human). + * + * @example 54 + */ + score: number; + /** + * A boolean value that is true if the request comes from a good bot, like Google or Bing. + * Most customers choose to allow this traffic. For more details, see [Traffic from known bots](https://developers.cloudflare.com/firewall/known-issues-and-faq/#how-does-firewall-rules-handle-traffic-from-known-bots). + */ + verifiedBot: boolean; + /** + * A boolean value that is true if the request originates from a + * Cloudflare-verified proxy service. + */ + corporateProxy: boolean; + /** + * A boolean value that's true if the request matches [file extensions](https://developers.cloudflare.com/bots/reference/static-resources/) for many types of static resources. + */ + staticResource: boolean; + /** + * List of IDs that correlate to the Bot Management heuristic detections made on a request (you can have multiple heuristic detections on the same request). + */ + detectionIds: number[]; +} +interface IncomingRequestCfPropertiesBotManagement { + /** + * Results of Cloudflare's Bot Management analysis + */ + botManagement: IncomingRequestCfPropertiesBotManagementBase; + /** + * Duplicate of `botManagement.score`. + * + * @deprecated + */ + clientTrustScore: number; +} +interface IncomingRequestCfPropertiesBotManagementEnterprise extends IncomingRequestCfPropertiesBotManagement { + /** + * Results of Cloudflare's Bot Management analysis + */ + botManagement: IncomingRequestCfPropertiesBotManagementBase & { + /** + * A [JA3 Fingerprint](https://developers.cloudflare.com/bots/concepts/ja3-fingerprint/) to help profile specific SSL/TLS clients + * across different destination IPs, Ports, and X509 certificates. + */ + ja3Hash: string; + }; +} +interface IncomingRequestCfPropertiesCloudflareForSaaSEnterprise { + /** + * Custom metadata set per-host in [Cloudflare for SaaS](https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/). + * + * This field is only present if you have Cloudflare for SaaS enabled on your account + * and you have followed the [required steps to enable it]((https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/domain-support/custom-metadata/)). + */ + hostMetadata?: HostMetadata; +} +interface IncomingRequestCfPropertiesCloudflareAccessOrApiShield { + /** + * Information about the client certificate presented to Cloudflare. + * + * This is populated when the incoming request is served over TLS using + * either Cloudflare Access or API Shield (mTLS) + * and the presented SSL certificate has a valid + * [Certificate Serial Number](https://ldapwiki.com/wiki/Certificate%20Serial%20Number) + * (i.e., not `null` or `""`). + * + * Otherwise, a set of placeholder values are used. + * + * The property `certPresented` will be set to `"1"` when + * the object is populated (i.e. the above conditions were met). + */ + tlsClientAuth: IncomingRequestCfPropertiesTLSClientAuth | IncomingRequestCfPropertiesTLSClientAuthPlaceholder; +} +/** + * Metadata about the request's TLS handshake + */ +interface IncomingRequestCfPropertiesExportedAuthenticatorMetadata { + /** + * The client's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal + * + * @example "44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d" + */ + clientHandshake: string; + /** + * The server's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal + * + * @example "44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d" + */ + serverHandshake: string; + /** + * The client's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal + * + * @example "084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b" + */ + clientFinished: string; + /** + * The server's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal + * + * @example "084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b" + */ + serverFinished: string; +} +/** + * Geographic data about the request's origin. + */ +interface IncomingRequestCfPropertiesGeographicInformation { + /** + * The [ISO 3166-1 Alpha 2](https://www.iso.org/iso-3166-country-codes.html) country code the request originated from. + * + * If your worker is [configured to accept TOR connections](https://support.cloudflare.com/hc/en-us/articles/203306930-Understanding-Cloudflare-Tor-support-and-Onion-Routing), this may also be `"T1"`, indicating a request that originated over TOR. + * + * If Cloudflare is unable to determine where the request originated this property is omitted. + * + * The country code `"T1"` is used for requests originating on TOR. + * + * @example "GB" + */ + country?: Iso3166Alpha2Code | "T1"; + /** + * If present, this property indicates that the request originated in the EU + * + * @example "1" + */ + isEUCountry?: "1"; + /** + * A two-letter code indicating the continent the request originated from. + * + * @example "AN" + */ + continent?: ContinentCode; + /** + * The city the request originated from + * + * @example "Austin" + */ + city?: string; + /** + * Postal code of the incoming request + * + * @example "78701" + */ + postalCode?: string; + /** + * Latitude of the incoming request + * + * @example "30.27130" + */ + latitude?: string; + /** + * Longitude of the incoming request + * + * @example "-97.74260" + */ + longitude?: string; + /** + * Timezone of the incoming request + * + * @example "America/Chicago" + */ + timezone?: string; + /** + * If known, the ISO 3166-2 name for the first level region associated with + * the IP address of the incoming request + * + * @example "Texas" + */ + region?: string; + /** + * If known, the ISO 3166-2 code for the first-level region associated with + * the IP address of the incoming request + * + * @example "TX" + */ + regionCode?: string; + /** + * Metro code (DMA) of the incoming request + * + * @example "635" + */ + metroCode?: string; +} +/** Data about the incoming request's TLS certificate */ +interface IncomingRequestCfPropertiesTLSClientAuth { + /** Always `"1"`, indicating that the certificate was presented */ + certPresented: "1"; + /** + * Result of certificate verification. + * + * @example "FAILED:self signed certificate" + */ + certVerified: Exclude; + /** The presented certificate's revokation status. + * + * - A value of `"1"` indicates the certificate has been revoked + * - A value of `"0"` indicates the certificate has not been revoked + */ + certRevoked: "1" | "0"; + /** + * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) + * + * @example "CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certIssuerDN: string; + /** + * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) + * + * @example "CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certSubjectDN: string; + /** + * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted) + * + * @example "CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certIssuerDNRFC2253: string; + /** + * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted) + * + * @example "CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certSubjectDNRFC2253: string; + /** The certificate issuer's distinguished name (legacy policies) */ + certIssuerDNLegacy: string; + /** The certificate subject's distinguished name (legacy policies) */ + certSubjectDNLegacy: string; + /** + * The certificate's serial number + * + * @example "00936EACBE07F201DF" + */ + certSerial: string; + /** + * The certificate issuer's serial number + * + * @example "2489002934BDFEA34" + */ + certIssuerSerial: string; + /** + * The certificate's Subject Key Identifier + * + * @example "BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4" + */ + certSKI: string; + /** + * The certificate issuer's Subject Key Identifier + * + * @example "BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4" + */ + certIssuerSKI: string; + /** + * The certificate's SHA-1 fingerprint + * + * @example "6b9109f323999e52259cda7373ff0b4d26bd232e" + */ + certFingerprintSHA1: string; + /** + * The certificate's SHA-256 fingerprint + * + * @example "acf77cf37b4156a2708e34c4eb755f9b5dbbe5ebb55adfec8f11493438d19e6ad3f157f81fa3b98278453d5652b0c1fd1d71e5695ae4d709803a4d3f39de9dea" + */ + certFingerprintSHA256: string; + /** + * The effective starting date of the certificate + * + * @example "Dec 22 19:39:00 2018 GMT" + */ + certNotBefore: string; + /** + * The effective expiration date of the certificate + * + * @example "Dec 22 19:39:00 2018 GMT" + */ + certNotAfter: string; +} +/** Placeholder values for TLS Client Authorization */ +interface IncomingRequestCfPropertiesTLSClientAuthPlaceholder { + certPresented: "0"; + certVerified: "NONE"; + certRevoked: "0"; + certIssuerDN: ""; + certSubjectDN: ""; + certIssuerDNRFC2253: ""; + certSubjectDNRFC2253: ""; + certIssuerDNLegacy: ""; + certSubjectDNLegacy: ""; + certSerial: ""; + certIssuerSerial: ""; + certSKI: ""; + certIssuerSKI: ""; + certFingerprintSHA1: ""; + certFingerprintSHA256: ""; + certNotBefore: ""; + certNotAfter: ""; +} +/** Possible outcomes of TLS verification */ +declare type CertVerificationStatus = +/** Authentication succeeded */ +"SUCCESS" +/** No certificate was presented */ + | "NONE" +/** Failed because the certificate was self-signed */ + | "FAILED:self signed certificate" +/** Failed because the certificate failed a trust chain check */ + | "FAILED:unable to verify the first certificate" +/** Failed because the certificate not yet valid */ + | "FAILED:certificate is not yet valid" +/** Failed because the certificate is expired */ + | "FAILED:certificate has expired" +/** Failed for another unspecified reason */ + | "FAILED"; +/** + * An upstream endpoint's response to a TCP `keepalive` message from Cloudflare. + */ +declare type IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus = 0 /** Unknown */ | 1 /** no keepalives (not found) */ | 2 /** no connection re-use, opening keepalive connection failed */ | 3 /** no connection re-use, keepalive accepted and saved */ | 4 /** connection re-use, refused by the origin server (`TCP FIN`) */ | 5; /** connection re-use, accepted by the origin server */ +/** ISO 3166-1 Alpha-2 codes */ +declare type Iso3166Alpha2Code = "AD" | "AE" | "AF" | "AG" | "AI" | "AL" | "AM" | "AO" | "AQ" | "AR" | "AS" | "AT" | "AU" | "AW" | "AX" | "AZ" | "BA" | "BB" | "BD" | "BE" | "BF" | "BG" | "BH" | "BI" | "BJ" | "BL" | "BM" | "BN" | "BO" | "BQ" | "BR" | "BS" | "BT" | "BV" | "BW" | "BY" | "BZ" | "CA" | "CC" | "CD" | "CF" | "CG" | "CH" | "CI" | "CK" | "CL" | "CM" | "CN" | "CO" | "CR" | "CU" | "CV" | "CW" | "CX" | "CY" | "CZ" | "DE" | "DJ" | "DK" | "DM" | "DO" | "DZ" | "EC" | "EE" | "EG" | "EH" | "ER" | "ES" | "ET" | "FI" | "FJ" | "FK" | "FM" | "FO" | "FR" | "GA" | "GB" | "GD" | "GE" | "GF" | "GG" | "GH" | "GI" | "GL" | "GM" | "GN" | "GP" | "GQ" | "GR" | "GS" | "GT" | "GU" | "GW" | "GY" | "HK" | "HM" | "HN" | "HR" | "HT" | "HU" | "ID" | "IE" | "IL" | "IM" | "IN" | "IO" | "IQ" | "IR" | "IS" | "IT" | "JE" | "JM" | "JO" | "JP" | "KE" | "KG" | "KH" | "KI" | "KM" | "KN" | "KP" | "KR" | "KW" | "KY" | "KZ" | "LA" | "LB" | "LC" | "LI" | "LK" | "LR" | "LS" | "LT" | "LU" | "LV" | "LY" | "MA" | "MC" | "MD" | "ME" | "MF" | "MG" | "MH" | "MK" | "ML" | "MM" | "MN" | "MO" | "MP" | "MQ" | "MR" | "MS" | "MT" | "MU" | "MV" | "MW" | "MX" | "MY" | "MZ" | "NA" | "NC" | "NE" | "NF" | "NG" | "NI" | "NL" | "NO" | "NP" | "NR" | "NU" | "NZ" | "OM" | "PA" | "PE" | "PF" | "PG" | "PH" | "PK" | "PL" | "PM" | "PN" | "PR" | "PS" | "PT" | "PW" | "PY" | "QA" | "RE" | "RO" | "RS" | "RU" | "RW" | "SA" | "SB" | "SC" | "SD" | "SE" | "SG" | "SH" | "SI" | "SJ" | "SK" | "SL" | "SM" | "SN" | "SO" | "SR" | "SS" | "ST" | "SV" | "SX" | "SY" | "SZ" | "TC" | "TD" | "TF" | "TG" | "TH" | "TJ" | "TK" | "TL" | "TM" | "TN" | "TO" | "TR" | "TT" | "TV" | "TW" | "TZ" | "UA" | "UG" | "UM" | "US" | "UY" | "UZ" | "VA" | "VC" | "VE" | "VG" | "VI" | "VN" | "VU" | "WF" | "WS" | "YE" | "YT" | "ZA" | "ZM" | "ZW"; +/** The 2-letter continent codes Cloudflare uses */ +declare type ContinentCode = "AF" | "AN" | "AS" | "EU" | "NA" | "OC" | "SA"; +type CfProperties = IncomingRequestCfProperties | RequestInitCfProperties; +interface D1Meta { + duration: number; + size_after: number; + rows_read: number; + rows_written: number; + last_row_id: number; + changed_db: boolean; + changes: number; + /** + * The region of the database instance that executed the query. + */ + served_by_region?: string; + /** + * The three letters airport code of the colo that executed the query. + */ + served_by_colo?: string; + /** + * True if-and-only-if the database instance that executed the query was the primary. + */ + served_by_primary?: boolean; + timings?: { + /** + * The duration of the SQL query execution by the database instance. It doesn't include any network time. + */ + sql_duration_ms: number; + }; + /** + * Number of total attempts to execute the query, due to automatic retries. + * Note: All other fields in the response like `timings` only apply to the last attempt. + */ + total_attempts?: number; +} +interface D1Response { + success: true; + meta: D1Meta & Record; + error?: never; +} +type D1Result = D1Response & { + results: T[]; +}; +interface D1ExecResult { + count: number; + duration: number; +} +type D1SessionConstraint = +// Indicates that the first query should go to the primary, and the rest queries +// using the same D1DatabaseSession will go to any replica that is consistent with +// the bookmark maintained by the session (returned by the first query). +'first-primary' +// Indicates that the first query can go anywhere (primary or replica), and the rest queries +// using the same D1DatabaseSession will go to any replica that is consistent with +// the bookmark maintained by the session (returned by the first query). + | 'first-unconstrained'; +type D1SessionBookmark = string; +declare abstract class D1Database { + prepare(query: string): D1PreparedStatement; + batch(statements: D1PreparedStatement[]): Promise[]>; + exec(query: string): Promise; + /** + * Creates a new D1 Session anchored at the given constraint or the bookmark. + * All queries executed using the created session will have sequential consistency, + * meaning that all writes done through the session will be visible in subsequent reads. + * + * @param constraintOrBookmark Either the session constraint or the explicit bookmark to anchor the created session. + */ + withSession(constraintOrBookmark?: D1SessionBookmark | D1SessionConstraint): D1DatabaseSession; + /** + * @deprecated dump() will be removed soon, only applies to deprecated alpha v1 databases. + */ + dump(): Promise; +} +declare abstract class D1DatabaseSession { + prepare(query: string): D1PreparedStatement; + batch(statements: D1PreparedStatement[]): Promise[]>; + /** + * @returns The latest session bookmark across all executed queries on the session. + * If no query has been executed yet, `null` is returned. + */ + getBookmark(): D1SessionBookmark | null; +} +declare abstract class D1PreparedStatement { + bind(...values: unknown[]): D1PreparedStatement; + first(colName: string): Promise; + first>(): Promise; + run>(): Promise>; + all>(): Promise>; + raw(options: { + columnNames: true; + }): Promise<[ + string[], + ...T[] + ]>; + raw(options?: { + columnNames?: false; + }): Promise; +} +// `Disposable` was added to TypeScript's standard lib types in version 5.2. +// To support older TypeScript versions, define an empty `Disposable` interface. +// Users won't be able to use `using`/`Symbol.dispose` without upgrading to 5.2, +// but this will ensure type checking on older versions still passes. +// TypeScript's interface merging will ensure our empty interface is effectively +// ignored when `Disposable` is included in the standard lib. +interface Disposable { +} +/** + * The returned data after sending an email + */ +interface EmailSendResult { + /** + * The Email Message ID + */ + messageId: string; +} +/** + * An email message that can be sent from a Worker. + */ +interface EmailMessage { + /** + * Envelope From attribute of the email message. + */ + readonly from: string; + /** + * Envelope To attribute of the email message. + */ + readonly to: string; +} +/** + * An email message that is sent to a consumer Worker and can be rejected/forwarded. + */ +interface ForwardableEmailMessage extends EmailMessage { + /** + * Stream of the email message content. + */ + readonly raw: ReadableStream; + /** + * An [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers). + */ + readonly headers: Headers; + /** + * Size of the email message content. + */ + readonly rawSize: number; + /** + * Reject this email message by returning a permanent SMTP error back to the connecting client including the given reason. + * @param reason The reject reason. + * @returns void + */ + setReject(reason: string): void; + /** + * Forward this email message to a verified destination address of the account. + * @param rcptTo Verified destination address. + * @param headers A [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers). + * @returns A promise that resolves when the email message is forwarded. + */ + forward(rcptTo: string, headers?: Headers): Promise; + /** + * Reply to the sender of this email message with a new EmailMessage object. + * @param message The reply message. + * @returns A promise that resolves when the email message is replied. + */ + reply(message: EmailMessage): Promise; +} +/** A file attachment for an email message */ +type EmailAttachment = { + disposition: 'inline'; + contentId: string; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; +} | { + disposition: 'attachment'; + contentId?: undefined; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; +}; +/** An Email Address */ +interface EmailAddress { + name: string; + email: string; +} +/** + * A binding that allows a Worker to send email messages. + */ +interface SendEmail { + send(message: EmailMessage): Promise; + send(builder: { + from: string | EmailAddress; + to: string | string[]; + subject: string; + replyTo?: string | EmailAddress; + cc?: string | string[]; + bcc?: string | string[]; + headers?: Record; + text?: string; + html?: string; + attachments?: EmailAttachment[]; + }): Promise; +} +declare abstract class EmailEvent extends ExtendableEvent { + readonly message: ForwardableEmailMessage; +} +declare type EmailExportedHandler = (message: ForwardableEmailMessage, env: Env, ctx: ExecutionContext) => void | Promise; +declare module "cloudflare:email" { + let _EmailMessage: { + prototype: EmailMessage; + new (from: string, to: string, raw: ReadableStream | string): EmailMessage; + }; + export { _EmailMessage as EmailMessage }; +} +/** + * Evaluation context for targeting rules. + * Keys are attribute names (e.g. "userId", "country"), values are the attribute values. + */ +type FlagshipEvaluationContext = Record; +interface FlagshipEvaluationDetails { + flagKey: string; + value: T; + variant?: string | undefined; + reason?: string | undefined; + errorCode?: string | undefined; + errorMessage?: string | undefined; +} +interface FlagshipEvaluationError extends Error { +} +/** + * Feature flags binding for evaluating feature flags from a Cloudflare Workers script. + * + * @example + * ```typescript + * // Get a boolean flag value with a default + * const enabled = await env.FLAGS.getBooleanValue('my-feature', false); + * + * // Get a flag value with evaluation context for targeting + * const variant = await env.FLAGS.getStringValue('experiment', 'control', { + * userId: 'user-123', + * country: 'US', + * }); + * + * // Get full evaluation details including variant and reason + * const details = await env.FLAGS.getBooleanDetails('my-feature', false); + * console.log(details.variant, details.reason); + * ``` + */ +declare abstract class Flagship { + /** + * Get a flag value without type checking. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Optional default value returned when evaluation fails. + * @param context Optional evaluation context for targeting rules. + */ + get(flagKey: string, defaultValue?: unknown, context?: FlagshipEvaluationContext): Promise; + /** + * Get a boolean flag value. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getBooleanValue(flagKey: string, defaultValue: boolean, context?: FlagshipEvaluationContext): Promise; + /** + * Get a string flag value. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getStringValue(flagKey: string, defaultValue: string, context?: FlagshipEvaluationContext): Promise; + /** + * Get a number flag value. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getNumberValue(flagKey: string, defaultValue: number, context?: FlagshipEvaluationContext): Promise; + /** + * Get an object flag value. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getObjectValue(flagKey: string, defaultValue: T, context?: FlagshipEvaluationContext): Promise; + /** + * Get a boolean flag value with full evaluation details. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getBooleanDetails(flagKey: string, defaultValue: boolean, context?: FlagshipEvaluationContext): Promise>; + /** + * Get a string flag value with full evaluation details. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getStringDetails(flagKey: string, defaultValue: string, context?: FlagshipEvaluationContext): Promise>; + /** + * Get a number flag value with full evaluation details. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getNumberDetails(flagKey: string, defaultValue: number, context?: FlagshipEvaluationContext): Promise>; + /** + * Get an object flag value with full evaluation details. + * @param flagKey The key of the flag to evaluate. + * @param defaultValue Default value returned when evaluation fails or the flag type does not match. + * @param context Optional evaluation context for targeting rules. + */ + getObjectDetails(flagKey: string, defaultValue: T, context?: FlagshipEvaluationContext): Promise>; +} +/** + * Hello World binding to serve as an explanatory example. DO NOT USE + */ +interface HelloWorldBinding { + /** + * Retrieve the current stored value + */ + get(): Promise<{ + value: string; + ms?: number; + }>; + /** + * Set a new stored value + */ + set(value: string): Promise; +} +interface Hyperdrive { + /** + * Connect directly to Hyperdrive as if it's your database, returning a TCP socket. + * + * Calling this method returns an identical socket to if you call + * `connect("host:port")` using the `host` and `port` fields from this object. + * Pick whichever approach works better with your preferred DB client library. + * + * Note that this socket is not yet authenticated -- it's expected that your + * code (or preferably, the client library of your choice) will authenticate + * using the information in this class's readonly fields. + */ + connect(): Socket; + /** + * A valid DB connection string that can be passed straight into the typical + * client library/driver/ORM. This will typically be the easiest way to use + * Hyperdrive. + */ + readonly connectionString: string; + /* + * A randomly generated hostname that is only valid within the context of the + * currently running Worker which, when passed into `connect()` function from + * the "cloudflare:sockets" module, will connect to the Hyperdrive instance + * for your database. + */ + readonly host: string; + /* + * The port that must be paired the the host field when connecting. + */ + readonly port: number; + /* + * The username to use when authenticating to your database via Hyperdrive. + * Unlike the host and password, this will be the same every time + */ + readonly user: string; + /* + * The randomly generated password to use when authenticating to your + * database via Hyperdrive. Like the host field, this password is only valid + * within the context of the currently running Worker instance from which + * it's read. + */ + readonly password: string; + /* + * The name of the database to connect to. + */ + readonly database: string; +} +// Copyright (c) 2024 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +type ImageInfoResponse = { + format: 'image/svg+xml'; +} | { + format: string; + fileSize: number; + width: number; + height: number; +}; +type ImageTransform = { + width?: number; + height?: number; + background?: string; + blur?: number; + border?: { + color?: string; + width?: number; + } | { + top?: number; + bottom?: number; + left?: number; + right?: number; + }; + brightness?: number; + contrast?: number; + fit?: 'scale-down' | 'contain' | 'pad' | 'squeeze' | 'cover' | 'crop'; + flip?: 'h' | 'v' | 'hv'; + gamma?: number; + segment?: 'foreground'; + gravity?: 'face' | 'left' | 'right' | 'top' | 'bottom' | 'center' | 'auto' | 'entropy' | { + x?: number; + y?: number; + mode: 'remainder' | 'box-center'; + }; + rotate?: 0 | 90 | 180 | 270; + saturation?: number; + sharpen?: number; + trim?: 'border' | { + top?: number; + bottom?: number; + left?: number; + right?: number; + width?: number; + height?: number; + border?: boolean | { + color?: string; + tolerance?: number; + keep?: number; + }; + }; +}; +type ImageDrawOptions = { + opacity?: number; + repeat?: boolean | string; + top?: number; + left?: number; + bottom?: number; + right?: number; +}; +type ImageInputOptions = { + encoding?: 'base64'; +}; +type ImageOutputOptions = { + format: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp' | 'image/avif' | 'rgb' | 'rgba'; + quality?: number; + background?: string; + anim?: boolean; +}; +interface ImageMetadata { + id: string; + filename?: string; + uploaded?: string; + requireSignedURLs: boolean; + meta?: Record; + variants: string[]; + draft?: boolean; + creator?: string; +} +interface ImageUploadOptions { + id?: string; + filename?: string; + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; + encoding?: 'base64'; +} +interface ImageUpdateOptions { + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; +} +interface ImageListOptions { + limit?: number; + cursor?: string; + sortOrder?: 'asc' | 'desc'; + creator?: string; +} +interface ImageList { + images: ImageMetadata[]; + cursor?: string; + listComplete: boolean; +} +interface ImageHandle { + /** + * Get metadata for a hosted image + * @returns Image metadata, or null if not found + */ + details(): Promise; + /** + * Get the raw image data for a hosted image + * @returns ReadableStream of image bytes, or null if not found + */ + bytes(): Promise | null>; + /** + * Update hosted image metadata + * @param options Properties to update + * @returns Updated image metadata + * @throws {@link ImagesError} if update fails + */ + update(options: ImageUpdateOptions): Promise; + /** + * Delete a hosted image + * @returns True if deleted, false if not found + */ + delete(): Promise; +} +interface HostedImagesBinding { + /** + * Get a handle for a hosted image + * @param imageId The ID of the image (UUID or custom ID) + * @returns A handle for per-image operations + */ + image(imageId: string): ImageHandle; + /** + * Upload a new hosted image + * @param image The image file to upload + * @param options Upload configuration + * @returns Metadata for the uploaded image + * @throws {@link ImagesError} if upload fails + */ + upload(image: ReadableStream | ArrayBuffer, options?: ImageUploadOptions): Promise; + /** + * List hosted images with pagination + * @param options List configuration + * @returns List of images with pagination info + * @throws {@link ImagesError} if list fails + */ + list(options?: ImageListOptions): Promise; +} +interface ImagesBinding { + /** + * Get image metadata (type, width and height) + * @throws {@link ImagesError} with code 9412 if input is not an image + * @param stream The image bytes + */ + info(stream: ReadableStream, options?: ImageInputOptions): Promise; + /** + * Begin applying a series of transformations to an image + * @param stream The image bytes + * @returns A transform handle + */ + input(stream: ReadableStream, options?: ImageInputOptions): ImageTransformer; + /** + * Access hosted images CRUD operations + */ + readonly hosted: HostedImagesBinding; +} +interface ImageTransformer { + /** + * Apply transform next, returning a transform handle. + * You can then apply more transformations, draw, or retrieve the output. + * @param transform + */ + transform(transform: ImageTransform): ImageTransformer; + /** + * Draw an image on this transformer, returning a transform handle. + * You can then apply more transformations, draw, or retrieve the output. + * @param image The image (or transformer that will give the image) to draw + * @param options The options configuring how to draw the image + */ + draw(image: ReadableStream | ImageTransformer, options?: ImageDrawOptions): ImageTransformer; + /** + * Retrieve the image that results from applying the transforms to the + * provided input + * @param options Options that apply to the output e.g. output format + */ + output(options: ImageOutputOptions): Promise; +} +type ImageTransformationOutputOptions = { + encoding?: 'base64'; +}; +interface ImageTransformationResult { + /** + * The image as a response, ready to store in cache or return to users + */ + response(): Response; + /** + * The content type of the returned image + */ + contentType(): string; + /** + * The bytes of the response + */ + image(options?: ImageTransformationOutputOptions): ReadableStream; +} +interface ImagesError extends Error { + readonly code: number; + readonly message: string; + readonly stack?: string; +} +/** + * Media binding for transforming media streams. + * Provides the entry point for media transformation operations. + */ +interface MediaBinding { + /** + * Creates a media transformer from an input stream. + * @param media - The input media bytes + * @returns A MediaTransformer instance for applying transformations + */ + input(media: ReadableStream): MediaTransformer; +} +/** + * Media transformer for applying transformation operations to media content. + * Handles sizing, fitting, and other input transformation parameters. + */ +interface MediaTransformer { + /** + * Applies transformation options to the media content. + * @param transform - Configuration for how the media should be transformed + * @returns A generator for producing the transformed media output + */ + transform(transform?: MediaTransformationInputOptions): MediaTransformationGenerator; + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; +} +/** + * Generator for producing media transformation results. + * Configures the output format and parameters for the transformed media. + */ +interface MediaTransformationGenerator { + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; +} +/** + * Result of a media transformation operation. + * Provides multiple ways to access the transformed media content. + */ +interface MediaTransformationResult { + /** + * Returns the transformed media as a readable stream of bytes. + * @returns A promise containing a readable stream with the transformed media + */ + media(): Promise>; + /** + * Returns the transformed media as an HTTP response object. + * @returns The transformed media as a Promise, ready to store in cache or return to users + */ + response(): Promise; + /** + * Returns the MIME type of the transformed media. + * @returns A promise containing the content type string (e.g., 'image/jpeg', 'video/mp4') + */ + contentType(): Promise; +} +/** + * Configuration options for transforming media input. + * Controls how the media should be resized and fitted. + */ +type MediaTransformationInputOptions = { + /** How the media should be resized to fit the specified dimensions */ + fit?: 'contain' | 'cover' | 'scale-down'; + /** Target width in pixels */ + width?: number; + /** Target height in pixels */ + height?: number; +}; +/** + * Configuration options for Media Transformations output. + * Controls the format, timing, and type of the generated output. + */ +type MediaTransformationOutputOptions = { + /** + * Output mode determining the type of media to generate + */ + mode?: 'video' | 'spritesheet' | 'frame' | 'audio'; + /** Whether to include audio in the output */ + audio?: boolean; + /** + * Starting timestamp for frame extraction or start time for clips. (e.g. '2s'). + */ + time?: string; + /** + * Duration for video clips, audio extraction, and spritesheet generation (e.g. '5s'). + */ + duration?: string; + /** + * Number of frames in the spritesheet. + */ + imageCount?: number; + /** + * Output format for the generated media. + */ + format?: 'jpg' | 'png' | 'm4a'; +}; +/** + * Error object for media transformation operations. + * Extends the standard Error interface with additional media-specific information. + */ +interface MediaError extends Error { + readonly code: number; + readonly message: string; + readonly stack?: string; +} +declare module 'cloudflare:node' { + interface NodeStyleServer { + listen(...args: unknown[]): this; + address(): { + port?: number | null | undefined; + }; + } + export function httpServerHandler(port: number): ExportedHandler; + export function httpServerHandler(options: { + port: number; + }): ExportedHandler; + export function httpServerHandler(server: NodeStyleServer): ExportedHandler; +} +type Params

= Record; +type EventContext = { + request: Request>; + functionPath: string; + waitUntil: (promise: Promise) => void; + passThroughOnException: () => void; + next: (input?: Request | string, init?: RequestInit) => Promise; + env: Env & { + ASSETS: { + fetch: typeof fetch; + }; + }; + params: Params

; + data: Data; +}; +type PagesFunction = Record> = (context: EventContext) => Response | Promise; +type EventPluginContext = { + request: Request>; + functionPath: string; + waitUntil: (promise: Promise) => void; + passThroughOnException: () => void; + next: (input?: Request | string, init?: RequestInit) => Promise; + env: Env & { + ASSETS: { + fetch: typeof fetch; + }; + }; + params: Params

; + data: Data; + pluginArgs: PluginArgs; +}; +type PagesPluginFunction = Record, PluginArgs = unknown> = (context: EventPluginContext) => Response | Promise; +declare module "assets:*" { + export const onRequest: PagesFunction; +} +// Copyright (c) 2022-2023 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +declare module "cloudflare:pipelines" { + export abstract class PipelineTransformationEntrypoint { + protected env: Env; + protected ctx: ExecutionContext; + constructor(ctx: ExecutionContext, env: Env); + /** + * run receives an array of PipelineRecord which can be + * transformed and returned to the pipeline + * @param records Incoming records from the pipeline to be transformed + * @param metadata Information about the specific pipeline calling the transformation entrypoint + * @returns A promise containing the transformed PipelineRecord array + */ + public run(records: I[], metadata: PipelineBatchMetadata): Promise; + } + export type PipelineRecord = Record; + export type PipelineBatchMetadata = { + pipelineId: string; + pipelineName: string; + }; + export interface Pipeline { + /** + * The Pipeline interface represents the type of a binding to a Pipeline + * + * @param records The records to send to the pipeline + */ + send(records: T[]): Promise; + } +} +// PubSubMessage represents an incoming PubSub message. +// The message includes metadata about the broker, the client, and the payload +// itself. +// https://developers.cloudflare.com/pub-sub/ +interface PubSubMessage { + // Message ID + readonly mid: number; + // MQTT broker FQDN in the form mqtts://BROKER.NAMESPACE.cloudflarepubsub.com:PORT + readonly broker: string; + // The MQTT topic the message was sent on. + readonly topic: string; + // The client ID of the client that published this message. + readonly clientId: string; + // The unique identifier (JWT ID) used by the client to authenticate, if token + // auth was used. + readonly jti?: string; + // A Unix timestamp (seconds from Jan 1, 1970), set when the Pub/Sub Broker + // received the message from the client. + readonly receivedAt: number; + // An (optional) string with the MIME type of the payload, if set by the + // client. + readonly contentType: string; + // Set to 1 when the payload is a UTF-8 string + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901063 + readonly payloadFormatIndicator: number; + // Pub/Sub (MQTT) payloads can be UTF-8 strings, or byte arrays. + // You can use payloadFormatIndicator to inspect this before decoding. + payload: string | Uint8Array; +} +// JsonWebKey extended by kid parameter +interface JsonWebKeyWithKid extends JsonWebKey { + // Key Identifier of the JWK + readonly kid: string; +} +interface RateLimitOptions { + key: string; +} +interface RateLimitOutcome { + success: boolean; +} +interface RateLimit { + /** + * Rate limit a request based on the provided options. + * @see https://developers.cloudflare.com/workers/runtime-apis/bindings/rate-limit/ + * @returns A promise that resolves with the outcome of the rate limit. + */ + limit(options: RateLimitOptions): Promise; +} +// Namespace for RPC utility types. Unfortunately, we can't use a `module` here as these types need +// to referenced by `Fetcher`. This is included in the "importable" version of the types which +// strips all `module` blocks. +declare namespace Rpc { + // Branded types for identifying `WorkerEntrypoint`/`DurableObject`/`Target`s. + // TypeScript uses *structural* typing meaning anything with the same shape as type `T` is a `T`. + // For the classes exported by `cloudflare:workers` we want *nominal* typing (i.e. we only want to + // accept `WorkerEntrypoint` from `cloudflare:workers`, not any other class with the same shape) + export const __RPC_STUB_BRAND: '__RPC_STUB_BRAND'; + export const __RPC_TARGET_BRAND: '__RPC_TARGET_BRAND'; + export const __WORKER_ENTRYPOINT_BRAND: '__WORKER_ENTRYPOINT_BRAND'; + export const __DURABLE_OBJECT_BRAND: '__DURABLE_OBJECT_BRAND'; + export const __WORKFLOW_ENTRYPOINT_BRAND: '__WORKFLOW_ENTRYPOINT_BRAND'; + export interface RpcTargetBranded { + [__RPC_TARGET_BRAND]: never; + } + export interface WorkerEntrypointBranded { + [__WORKER_ENTRYPOINT_BRAND]: never; + } + export interface DurableObjectBranded { + [__DURABLE_OBJECT_BRAND]: never; + } + export interface WorkflowEntrypointBranded { + [__WORKFLOW_ENTRYPOINT_BRAND]: never; + } + export type EntrypointBranded = WorkerEntrypointBranded | DurableObjectBranded | WorkflowEntrypointBranded; + // Types that can be used through `Stub`s + export type Stubable = RpcTargetBranded | ((...args: any[]) => any); + // Types that can be passed over RPC + // The reason for using a generic type here is to build a serializable subset of structured + // cloneable composite types. This allows types defined with the "interface" keyword to pass the + // serializable check as well. Otherwise, only types defined with the "type" keyword would pass. + type Serializable = + // Structured cloneables + BaseType + // Structured cloneable composites + | Map ? Serializable : never, T extends Map ? Serializable : never> | Set ? Serializable : never> | ReadonlyArray ? Serializable : never> | { + [K in keyof T]: K extends number | string ? Serializable : never; + } + // Special types + | Stub + // Serialized as stubs, see `Stubify` + | Stubable; + // Base type for all RPC stubs, including common memory management methods. + // `T` is used as a marker type for unwrapping `Stub`s later. + interface StubBase extends Disposable { + [__RPC_STUB_BRAND]: T; + dup(): this; + } + export type Stub = Provider & StubBase; + // This represents all the types that can be sent as-is over an RPC boundary + type BaseType = void | undefined | null | boolean | number | bigint | string | TypedArray | ArrayBuffer | DataView | Date | Error | RegExp | ReadableStream | WritableStream | Request | Response | Headers; + // Recursively rewrite all `Stubable` types with `Stub`s + // prettier-ignore + type Stubify = T extends Stubable ? Stub : T extends Map ? Map, Stubify> : T extends Set ? Set> : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends BaseType ? T : T extends { + [key: string | number]: any; + } ? { + [K in keyof T]: Stubify; + } : T; + // Recursively rewrite all `Stub`s with the corresponding `T`s. + // Note we use `StubBase` instead of `Stub` here to avoid circular dependencies: + // `Stub` depends on `Provider`, which depends on `Unstubify`, which would depend on `Stub`. + // prettier-ignore + type Unstubify = T extends StubBase ? V : T extends Map ? Map, Unstubify> : T extends Set ? Set> : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends BaseType ? T : T extends { + [key: string | number]: unknown; + } ? { + [K in keyof T]: Unstubify; + } : T; + type UnstubifyAll = { + [I in keyof A]: Unstubify; + }; + // Utility type for adding `Provider`/`Disposable`s to `object` types only. + // Note `unknown & T` is equivalent to `T`. + type MaybeProvider = T extends object ? Provider : unknown; + type MaybeDisposable = T extends object ? Disposable : unknown; + // Type for method return or property on an RPC interface. + // - Stubable types are replaced by stubs. + // - Serializable types are passed by value, with stubable types replaced by stubs + // and a top-level `Disposer`. + // Everything else can't be passed over PRC. + // Technically, we use custom thenables here, but they quack like `Promise`s. + // Intersecting with `(Maybe)Provider` allows pipelining. + // prettier-ignore + type Result = R extends Stubable ? Promise> & Provider : R extends Serializable ? Promise & MaybeDisposable> & MaybeProvider : never; + // Type for method or property on an RPC interface. + // For methods, unwrap `Stub`s in parameters, and rewrite returns to be `Result`s. + // Unwrapping `Stub`s allows calling with `Stubable` arguments. + // For properties, rewrite types to be `Result`s. + // In each case, unwrap `Promise`s. + type MethodOrProperty = V extends (...args: infer P) => infer R ? (...args: UnstubifyAll

) => Result> : Result>; + // Type for the callable part of an `Provider` if `T` is callable. + // This is intersected with methods/properties. + type MaybeCallableProvider = T extends (...args: any[]) => any ? MethodOrProperty : unknown; + // Base type for all other types providing RPC-like interfaces. + // Rewrites all methods/properties to be `MethodOrProperty`s, while preserving callable types. + // `Reserved` names (e.g. stub method names like `dup()`) and symbols can't be accessed over RPC. + export type Provider = MaybeCallableProvider & Pick<{ + [K in keyof T]: MethodOrProperty; + }, Exclude>>; +} +declare namespace Cloudflare { + // Type of `env`. + // + // The specific project can extend `Env` by redeclaring it in project-specific files. Typescript + // will merge all declarations. + // + // You can use `wrangler types` to generate the `Env` type automatically. + interface Env { + } + // Project-specific parameters used to inform types. + // + // This interface is, again, intended to be declared in project-specific files, and then that + // declaration will be merged with this one. + // + // A project should have a declaration like this: + // + // interface GlobalProps { + // // Declares the main module's exports. Used to populate Cloudflare.Exports aka the type + // // of `ctx.exports`. + // mainModule: typeof import("my-main-module"); + // + // // Declares which of the main module's exports are configured with durable storage, and + // // thus should behave as Durable Object namsepace bindings. + // durableNamespaces: "MyDurableObject" | "AnotherDurableObject"; + // } + // + // You can use `wrangler types` to generate `GlobalProps` automatically. + interface GlobalProps { + } + // Evaluates to the type of a property in GlobalProps, defaulting to `Default` if it is not + // present. + type GlobalProp = K extends keyof GlobalProps ? GlobalProps[K] : Default; + // The type of the program's main module exports, if known. Requires `GlobalProps` to declare the + // `mainModule` property. + type MainModule = GlobalProp<"mainModule", {}>; + // The type of ctx.exports, which contains loopback bindings for all top-level exports. + type Exports = { + [K in keyof MainModule]: LoopbackForExport + // If the export is listed in `durableNamespaces`, then it is also a + // DurableObjectNamespace. + & (K extends GlobalProp<"durableNamespaces", never> ? MainModule[K] extends new (...args: any[]) => infer DoInstance ? DoInstance extends Rpc.DurableObjectBranded ? DurableObjectNamespace : DurableObjectNamespace : DurableObjectNamespace : {}); + }; +} +declare namespace CloudflareWorkersModule { + export type RpcStub = Rpc.Stub; + export const RpcStub: { + new (value: T): Rpc.Stub; + }; + export abstract class RpcTarget implements Rpc.RpcTargetBranded { + [Rpc.__RPC_TARGET_BRAND]: never; + } + // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC + export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { + [Rpc.__WORKER_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + email?(message: ForwardableEmailMessage): void | Promise; + fetch?(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; + queue?(batch: MessageBatch): void | Promise; + scheduled?(controller: ScheduledController): void | Promise; + tail?(events: TraceItem[]): void | Promise; + tailStream?(event: TailStream.TailEvent): TailStream.TailEventHandlerType | Promise; + test?(controller: TestController): void | Promise; + trace?(traces: TraceItem[]): void | Promise; + } + export abstract class DurableObject implements Rpc.DurableObjectBranded { + [Rpc.__DURABLE_OBJECT_BRAND]: never; + protected ctx: DurableObjectState; + protected env: Env; + constructor(ctx: DurableObjectState, env: Env); + alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; + fetch?(request: Request): Response | Promise; + connect?(socket: Socket): void | Promise; + webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; + webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; + webSocketError?(ws: WebSocket, error: unknown): void | Promise; + } + export type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; + export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; + export type WorkflowDelayDuration = WorkflowSleepDuration; + export type WorkflowTimeoutDuration = WorkflowSleepDuration; + export type WorkflowRetentionDuration = WorkflowSleepDuration; + export type WorkflowBackoff = 'constant' | 'linear' | 'exponential'; + export type WorkflowStepConfig = { + retries?: { + limit: number; + delay: WorkflowDelayDuration | number; + backoff?: WorkflowBackoff; + }; + timeout?: WorkflowTimeoutDuration | number; + }; + export type WorkflowEvent = { + payload: Readonly; + timestamp: Date; + instanceId: string; + }; + export type WorkflowStepEvent = { + payload: Readonly; + timestamp: Date; + type: string; + }; + export type WorkflowStepContext = { + step: { + name: string; + count: number; + }; + attempt: number; + config: WorkflowStepConfig; + }; + export abstract class WorkflowStep { + do>(name: string, callback: (ctx: WorkflowStepContext) => Promise): Promise; + do>(name: string, config: WorkflowStepConfig, callback: (ctx: WorkflowStepContext) => Promise): Promise; + sleep: (name: string, duration: WorkflowSleepDuration) => Promise; + sleepUntil: (name: string, timestamp: Date | number) => Promise; + waitForEvent>(name: string, options: { + type: string; + timeout?: WorkflowTimeoutDuration | number; + }): Promise>; + } + export type WorkflowInstanceStatus = 'queued' | 'running' | 'paused' | 'errored' | 'terminated' | 'complete' | 'waiting' | 'waitingForPause' | 'unknown'; + export abstract class WorkflowEntrypoint | unknown = unknown> implements Rpc.WorkflowEntrypointBranded { + [Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + run(event: Readonly>, step: WorkflowStep): Promise; + } + export function waitUntil(promise: Promise): void; + export function withEnv(newEnv: unknown, fn: () => unknown): unknown; + export function withExports(newExports: unknown, fn: () => unknown): unknown; + export function withEnvAndExports(newEnv: unknown, newExports: unknown, fn: () => unknown): unknown; + export const env: Cloudflare.Env; + export const exports: Cloudflare.Exports; + export const cache: CacheContext; + export const tracing: Tracing; +} +declare module 'cloudflare:workers' { + export = CloudflareWorkersModule; +} +interface SecretsStoreSecret { + /** + * Get a secret from the Secrets Store, returning a string of the secret value + * if it exists, or throws an error if it does not exist + */ + get(): Promise; +} +declare module "cloudflare:sockets" { + function _connect(address: string | SocketAddress, options?: SocketOptions): Socket; + export { _connect as connect }; +} +/** + * Binding entrypoint for Cloudflare Stream. + * + * Usage: + * - Binding-level operations: + * `await env.STREAM.videos.upload` + * `await env.STREAM.videos.createDirectUpload` + * `await env.STREAM.videos.*` + * `await env.STREAM.watermarks.*` + * - Per-video operations: + * `await env.STREAM.video(id).downloads.*` + * `await env.STREAM.video(id).captions.*` + * + * Example usage: + * ```ts + * await env.STREAM.video(id).downloads.generate(); + * + * const video = env.STREAM.video(id) + * const captions = video.captions.list(); + * const videoDetails = video.details() + * ``` + */ +interface StreamBinding { + /** + * Returns a handle scoped to a single video for per-video operations. + * @param id The unique identifier for the video. + * @returns A handle for per-video operations. + */ + video(id: string): StreamVideoHandle; + /** + * Uploads a new video from a provided URL. + * @param url The URL to upload from. + * @param params Optional upload parameters. + * @returns The uploaded video details. + * @throws {BadRequestError} if the upload parameter is invalid or the URL is invalid + * @throws {QuotaReachedError} if the account storage capacity is exceeded + * @throws {MaxFileSizeError} if the file size is too large + * @throws {RateLimitedError} if the server received too many requests + * @throws {AlreadyUploadedError} if a video was already uploaded to this URL + * @throws {InternalError} if an unexpected error occurs + */ + upload(url: string, params?: StreamUrlUploadParams): Promise; + /** + * Creates a direct upload that allows video uploads without an API key. + * @param params Parameters for the direct upload + * @returns The direct upload details. + * @throws {BadRequestError} if the parameters are invalid + * @throws {RateLimitedError} if the server received too many requests + * @throws {InternalError} if an unexpected error occurs + */ + createDirectUpload(params: StreamDirectUploadCreateParams): Promise; + videos: StreamVideos; + watermarks: StreamWatermarks; +} +/** + * Handle for operations scoped to a single Stream video. + */ +interface StreamVideoHandle { + /** + * The unique identifier for the video. + */ + id: string; + /** + * Get a full videos details + * @returns The full video details. + * @throws {NotFoundError} if the video is not found + * @throws {InternalError} if an unexpected error occurs + */ + details(): Promise; + /** + * Update details for a single video. + * @param params The fields to update for the video. + * @returns The updated video details. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the parameters are invalid + * @throws {InternalError} if an unexpected error occurs + */ + update(params: StreamUpdateVideoParams): Promise; + /** + * Deletes a video and its copies from Cloudflare Stream. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(): Promise; + /** + * Creates a signed URL token for a video. + * @returns The signed token that was created. + * @throws {InternalError} if the signing key cannot be retrieved or the token cannot be signed + */ + generateToken(): Promise; + downloads: StreamScopedDownloads; + captions: StreamScopedCaptions; +} +interface StreamVideo { + /** + * The unique identifier for the video. + */ + id: string; + /** + * A user-defined identifier for the media creator. + */ + creator: string | null; + /** + * The thumbnail URL for the video. + */ + thumbnail: string; + /** + * The thumbnail timestamp percentage. + */ + thumbnailTimestampPct: number; + /** + * Indicates whether the video is ready to stream. + */ + readyToStream: boolean; + /** + * The date and time the video became ready to stream. + */ + readyToStreamAt: string | null; + /** + * Processing status information. + */ + status: StreamVideoStatus; + /** + * A user modifiable key-value store. + */ + meta: Record; + /** + * The date and time the video was created. + */ + created: string; + /** + * The date and time the video was last modified. + */ + modified: string; + /** + * The date and time at which the video will be deleted. + */ + scheduledDeletion: string | null; + /** + * The size of the video in bytes. + */ + size: number; + /** + * The preview URL for the video. + */ + preview?: string; + /** + * Origins allowed to display the video. + */ + allowedOrigins: Array; + /** + * Indicates whether signed URLs are required. + */ + requireSignedURLs: boolean | null; + /** + * The date and time the video was uploaded. + */ + uploaded: string | null; + /** + * The date and time when the upload URL expires. + */ + uploadExpiry: string | null; + /** + * The maximum size in bytes for direct uploads. + */ + maxSizeBytes: number | null; + /** + * The maximum duration in seconds for direct uploads. + */ + maxDurationSeconds: number | null; + /** + * The video duration in seconds. -1 indicates unknown. + */ + duration: number; + /** + * Input metadata for the original upload. + */ + input: StreamVideoInput; + /** + * Playback URLs for the video. + */ + hlsPlaybackUrl: string; + dashPlaybackUrl: string; + /** + * The watermark applied to the video, if any. + */ + watermark: StreamWatermark | null; + /** + * The live input id associated with the video, if any. + */ + liveInputId?: string | null; + /** + * The source video id if this is a clip. + */ + clippedFromId: string | null; + /** + * Public details associated with the video. + */ + publicDetails: StreamPublicDetails | null; +} +type StreamVideoStatus = { + /** + * The current processing state. + */ + state: string; + /** + * The current processing step. + */ + step?: string; + /** + * The percent complete as a string. + */ + pctComplete?: string; + /** + * An error reason code, if applicable. + */ + errorReasonCode: string; + /** + * An error reason text, if applicable. + */ + errorReasonText: string; +}; +type StreamVideoInput = { + /** + * The input width in pixels. + */ + width: number; + /** + * The input height in pixels. + */ + height: number; +}; +type StreamPublicDetails = { + /** + * The public title for the video. + */ + title: string | null; + /** + * The public share link. + */ + share_link: string | null; + /** + * The public channel link. + */ + channel_link: string | null; + /** + * The public logo URL. + */ + logo: string | null; +}; +type StreamDirectUpload = { + /** + * The URL an unauthenticated upload can use for a single multipart request. + */ + uploadURL: string; + /** + * A Cloudflare-generated unique identifier for a media item. + */ + id: string; + /** + * The watermark profile applied to the upload. + */ + watermark: StreamWatermark | null; + /** + * The scheduled deletion time, if any. + */ + scheduledDeletion: string | null; +}; +type StreamDirectUploadCreateParams = { + /** + * The maximum duration in seconds for a video upload. + */ + maxDurationSeconds: number; + /** + * The date and time after upload when videos will not be accepted. + */ + expiry?: string; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * A user modifiable key-value store used to reference other systems of record for + * managing videos. + */ + meta?: Record; + /** + * Lists the origins allowed to display the video. + */ + allowedOrigins?: Array; + /** + * Indicates whether the video can be accessed using the id. When set to `true`, + * a signed token must be generated with a signing key to view the video. + */ + requireSignedURLs?: boolean; + /** + * The thumbnail timestamp percentage. + */ + thumbnailTimestampPct?: number; + /** + * The date and time at which the video will be deleted. Include `null` to remove + * a scheduled deletion. + */ + scheduledDeletion?: string | null; + /** + * The watermark profile to apply. + */ + watermark?: StreamDirectUploadWatermark; +}; +type StreamDirectUploadWatermark = { + /** + * The unique identifier for the watermark profile. + */ + id: string; +}; +type StreamUrlUploadParams = { + /** + * Lists the origins allowed to display the video. Enter allowed origin + * domains in an array and use `*` for wildcard subdomains. Empty arrays allow the + * video to be viewed on any origin. + */ + allowedOrigins?: Array; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * A user modifiable key-value store used to reference other systems of + * record for managing videos. + */ + meta?: Record; + /** + * Indicates whether the video can be a accessed using the id. When + * set to `true`, a signed token must be generated with a signing key to view the + * video. + */ + requireSignedURLs?: boolean; + /** + * Indicates the date and time at which the video will be deleted. Omit + * the field to indicate no change, or include with a `null` value to remove an + * existing scheduled deletion. If specified, must be at least 30 days from upload + * time. + */ + scheduledDeletion?: string | null; + /** + * The timestamp for a thumbnail image calculated as a percentage value + * of the video's duration. To convert from a second-wise timestamp to a + * percentage, divide the desired timestamp by the total duration of the video. If + * this value is not set, the default thumbnail image is taken from 0s of the + * video. + */ + thumbnailTimestampPct?: number; + /** + * The identifier for the watermark profile + */ + watermarkId?: string; +}; +interface StreamScopedCaptions { + /** + * Uploads the caption or subtitle file to the endpoint for a specific BCP47 language. + * One caption or subtitle file per language is allowed. + * @param language The BCP 47 language tag for the caption or subtitle. + * @param input The caption or subtitle stream to upload. + * @returns The created caption entry. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the language or file is invalid + * @throws {InternalError} if an unexpected error occurs + */ + upload(language: string, input: ReadableStream): Promise; + /** + * Generate captions or subtitles for the provided language via AI. + * @param language The BCP 47 language tag to generate. + * @returns The generated caption entry. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the language is invalid + * @throws {StreamError} if a generated caption already exists + * @throws {StreamError} if the video duration is too long + * @throws {StreamError} if the video is missing audio + * @throws {StreamError} if the requested language is not supported + * @throws {InternalError} if an unexpected error occurs + */ + generate(language: string): Promise; + /** + * Lists the captions or subtitles. + * Use the language parameter to filter by a specific language. + * @param language The optional BCP 47 language tag to filter by. + * @returns The list of captions or subtitles. + * @throws {NotFoundError} if the video or caption is not found + * @throws {InternalError} if an unexpected error occurs + */ + list(language?: string): Promise; + /** + * Removes the captions or subtitles from a video. + * @param language The BCP 47 language tag to remove. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video or caption is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(language: string): Promise; +} +interface StreamScopedDownloads { + /** + * Generates a download for a video when a video is ready to view. Available + * types are `default` and `audio`. Defaults to `default` when omitted. + * @param downloadType The download type to create. + * @returns The current downloads for the video. + * @throws {NotFoundError} if the video is not found + * @throws {BadRequestError} if the download type is invalid + * @throws {StreamError} if the video duration is too long to generate a download + * @throws {StreamError} if the video is not ready to stream + * @throws {InternalError} if an unexpected error occurs + */ + generate(downloadType?: StreamDownloadType): Promise; + /** + * Lists the downloads created for a video. + * @returns The current downloads for the video. + * @throws {NotFoundError} if the video or downloads are not found + * @throws {InternalError} if an unexpected error occurs + */ + get(): Promise; + /** + * Delete the downloads for a video. Available types are `default` and `audio`. + * Defaults to `default` when omitted. + * @param downloadType The download type to delete. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the video or downloads are not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(downloadType?: StreamDownloadType): Promise; +} +interface StreamVideos { + /** + * Lists all videos in a users account. + * @returns The list of videos. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InternalError} if an unexpected error occurs + */ + list(params?: StreamVideosListParams): Promise; +} +interface StreamWatermarks { + /** + * Generate a new watermark profile + * @param input The image stream to upload + * @param params The watermark creation parameters. + * @returns The created watermark profile. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InvalidURLError} if the URL is invalid + * @throws {TooManyWatermarksError} if the number of allowed watermarks is reached + * @throws {InternalError} if an unexpected error occurs + */ + generate(input: ReadableStream, params: StreamWatermarkCreateParams): Promise; + /** + * Generate a new watermark profile + * @param url The image url to upload + * @param params The watermark creation parameters. + * @returns The created watermark profile. + * @throws {BadRequestError} if the parameters are invalid + * @throws {InvalidURLError} if the URL is invalid + * @throws {TooManyWatermarksError} if the number of allowed watermarks is reached + * @throws {InternalError} if an unexpected error occurs + */ + generate(url: string, params: StreamWatermarkCreateParams): Promise; + /** + * Lists all watermark profiles for an account. + * @returns The list of watermark profiles. + * @throws {InternalError} if an unexpected error occurs + */ + list(): Promise; + /** + * Retrieves details for a single watermark profile. + * @param watermarkId The watermark profile identifier. + * @returns The watermark profile details. + * @throws {NotFoundError} if the watermark is not found + * @throws {InternalError} if an unexpected error occurs + */ + get(watermarkId: string): Promise; + /** + * Deletes a watermark profile. + * @param watermarkId The watermark profile identifier. + * @returns A promise that resolves when deletion completes. + * @throws {NotFoundError} if the watermark is not found + * @throws {InternalError} if an unexpected error occurs + */ + delete(watermarkId: string): Promise; +} +type StreamUpdateVideoParams = { + /** + * Lists the origins allowed to display the video. Enter allowed origin + * domains in an array and use `*` for wildcard subdomains. Empty arrays allow the + * video to be viewed on any origin. + */ + allowedOrigins?: Array; + /** + * A user-defined identifier for the media creator. + */ + creator?: string; + /** + * The maximum duration in seconds for a video upload. Can be set for a + * video that is not yet uploaded to limit its duration. Uploads that exceed the + * specified duration will fail during processing. A value of `-1` means the value + * is unknown. + */ + maxDurationSeconds?: number; + /** + * A user modifiable key-value store used to reference other systems of + * record for managing videos. + */ + meta?: Record; + /** + * Indicates whether the video can be a accessed using the id. When + * set to `true`, a signed token must be generated with a signing key to view the + * video. + */ + requireSignedURLs?: boolean; + /** + * Indicates the date and time at which the video will be deleted. Omit + * the field to indicate no change, or include with a `null` value to remove an + * existing scheduled deletion. If specified, must be at least 30 days from upload + * time. + */ + scheduledDeletion?: string | null; + /** + * The timestamp for a thumbnail image calculated as a percentage value + * of the video's duration. To convert from a second-wise timestamp to a + * percentage, divide the desired timestamp by the total duration of the video. If + * this value is not set, the default thumbnail image is taken from 0s of the + * video. + */ + thumbnailTimestampPct?: number; +}; +type StreamCaption = { + /** + * Whether the caption was generated via AI. + */ + generated?: boolean; + /** + * The language label displayed in the native language to users. + */ + label: string; + /** + * The language tag in BCP 47 format. + */ + language: string; + /** + * The status of a generated caption. + */ + status?: 'ready' | 'inprogress' | 'error'; +}; +type StreamDownloadStatus = 'ready' | 'inprogress' | 'error'; +type StreamDownloadType = 'default' | 'audio'; +type StreamDownload = { + /** + * Indicates the progress as a percentage between 0 and 100. + */ + percentComplete: number; + /** + * The status of a generated download. + */ + status: StreamDownloadStatus; + /** + * The URL to access the generated download. + */ + url?: string; +}; +/** + * An object with download type keys. Each key is optional and only present if that + * download type has been created. + */ +type StreamDownloadGetResponse = { + /** + * The audio-only download. Only present if this download type has been created. + */ + audio?: StreamDownload; + /** + * The default video download. Only present if this download type has been created. + */ + default?: StreamDownload; +}; +type StreamWatermarkPosition = 'upperRight' | 'upperLeft' | 'lowerLeft' | 'lowerRight' | 'center'; +type StreamWatermark = { + /** + * The unique identifier for a watermark profile. + */ + id: string; + /** + * The size of the image in bytes. + */ + size: number; + /** + * The height of the image in pixels. + */ + height: number; + /** + * The width of the image in pixels. + */ + width: number; + /** + * The date and a time a watermark profile was created. + */ + created: string; + /** + * The source URL for a downloaded image. If the watermark profile was created via + * direct upload, this field is null. + */ + downloadedFrom: string | null; + /** + * A short description of the watermark profile. + */ + name: string; + /** + * The translucency of the image. A value of `0.0` makes the image completely + * transparent, and `1.0` makes the image completely opaque. Note that if the image + * is already semi-transparent, setting this to `1.0` will not make the image + * completely opaque. + */ + opacity: number; + /** + * The whitespace between the adjacent edges (determined by position) of the video + * and the image. `0.0` indicates no padding, and `1.0` indicates a fully padded + * video width or length, as determined by the algorithm. + */ + padding: number; + /** + * The size of the image relative to the overall size of the video. This parameter + * will adapt to horizontal and vertical videos automatically. `0.0` indicates no + * scaling (use the size of the image as-is), and `1.0 `fills the entire video. + */ + scale: number; + /** + * The location of the image. Valid positions are: `upperRight`, `upperLeft`, + * `lowerLeft`, `lowerRight`, and `center`. Note that `center` ignores the + * `padding` parameter. + */ + position: StreamWatermarkPosition; +}; +type StreamWatermarkCreateParams = { + /** + * A short description of the watermark profile. + */ + name?: string; + /** + * The translucency of the image. A value of `0.0` makes the image completely + * transparent, and `1.0` makes the image completely opaque. Note that if the + * image is already semi-transparent, setting this to `1.0` will not make the + * image completely opaque. + */ + opacity?: number; + /** + * The whitespace between the adjacent edges (determined by position) of the + * video and the image. `0.0` indicates no padding, and `1.0` indicates a fully + * padded video width or length, as determined by the algorithm. + */ + padding?: number; + /** + * The size of the image relative to the overall size of the video. This + * parameter will adapt to horizontal and vertical videos automatically. `0.0` + * indicates no scaling (use the size of the image as-is), and `1.0 `fills the + * entire video. + */ + scale?: number; + /** + * The location of the image. + */ + position?: StreamWatermarkPosition; +}; +type StreamVideosListParams = { + /** + * The maximum number of videos to return. + */ + limit?: number; + /** + * Return videos created before this timestamp. + * (RFC3339/RFC3339Nano) + */ + before?: string; + /** + * Comparison operator for the `before` field. + * @default 'lt' + */ + beforeComp?: StreamPaginationComparison; + /** + * Return videos created after this timestamp. + * (RFC3339/RFC3339Nano) + */ + after?: string; + /** + * Comparison operator for the `after` field. + * @default 'gte' + */ + afterComp?: StreamPaginationComparison; +}; +type StreamPaginationComparison = 'eq' | 'gt' | 'gte' | 'lt' | 'lte'; +/** + * Error object for Stream binding operations. + */ +interface StreamError extends Error { + readonly code: number; + readonly statusCode: number; + readonly message: string; + readonly stack?: string; +} +interface InternalError extends StreamError { + name: 'InternalError'; +} +interface BadRequestError extends StreamError { + name: 'BadRequestError'; +} +interface NotFoundError extends StreamError { + name: 'NotFoundError'; +} +interface ForbiddenError extends StreamError { + name: 'ForbiddenError'; +} +interface RateLimitedError extends StreamError { + name: 'RateLimitedError'; +} +interface QuotaReachedError extends StreamError { + name: 'QuotaReachedError'; +} +interface MaxFileSizeError extends StreamError { + name: 'MaxFileSizeError'; +} +interface InvalidURLError extends StreamError { + name: 'InvalidURLError'; +} +interface AlreadyUploadedError extends StreamError { + name: 'AlreadyUploadedError'; +} +interface TooManyWatermarksError extends StreamError { + name: 'TooManyWatermarksError'; +} +type MarkdownDocument = { + name: string; + blob: Blob; +}; +type ConversionResponse = { + id: string; + name: string; + mimeType: string; + format: 'markdown'; + tokens: number; + data: string; +} | { + id: string; + name: string; + mimeType: string; + format: 'error'; + error: string; +}; +type ImageConversionOptions = { + descriptionLanguage?: 'en' | 'es' | 'fr' | 'it' | 'pt' | 'de'; +}; +type EmbeddedImageConversionOptions = ImageConversionOptions & { + convert?: boolean; + maxConvertedImages?: number; +}; +type ConversionOptions = { + html?: { + images?: EmbeddedImageConversionOptions & { + convertOGImage?: boolean; + }; + hostname?: string; + cssSelector?: string; + }; + docx?: { + images?: EmbeddedImageConversionOptions; + }; + image?: ImageConversionOptions; + pdf?: { + images?: EmbeddedImageConversionOptions; + metadata?: boolean; + }; +}; +type ConversionRequestOptions = { + gateway?: GatewayOptions; + extraHeaders?: object; + conversionOptions?: ConversionOptions; +}; +type SupportedFileFormat = { + mimeType: string; + extension: string; +}; +declare abstract class ToMarkdownService { + transform(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + transform(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; + supported(): Promise; +} +declare namespace TailStream { + interface Header { + readonly name: string; + readonly value: string; + } + interface FetchEventInfo { + readonly type: "fetch"; + readonly method: string; + readonly url: string; + readonly cfJson?: object; + readonly headers: Header[]; + } + interface JsRpcEventInfo { + readonly type: "jsrpc"; + } + interface ScheduledEventInfo { + readonly type: "scheduled"; + readonly scheduledTime: Date; + readonly cron: string; + } + interface AlarmEventInfo { + readonly type: "alarm"; + readonly scheduledTime: Date; + } + interface QueueEventInfo { + readonly type: "queue"; + readonly queueName: string; + readonly batchSize: number; + } + interface EmailEventInfo { + readonly type: "email"; + readonly mailFrom: string; + readonly rcptTo: string; + readonly rawSize: number; + } + interface TraceEventInfo { + readonly type: "trace"; + readonly traces: (string | null)[]; + } + interface HibernatableWebSocketEventInfoMessage { + readonly type: "message"; + } + interface HibernatableWebSocketEventInfoError { + readonly type: "error"; + } + interface HibernatableWebSocketEventInfoClose { + readonly type: "close"; + readonly code: number; + readonly wasClean: boolean; + } + interface HibernatableWebSocketEventInfo { + readonly type: "hibernatableWebSocket"; + readonly info: HibernatableWebSocketEventInfoClose | HibernatableWebSocketEventInfoError | HibernatableWebSocketEventInfoMessage; + } + interface CustomEventInfo { + readonly type: "custom"; + } + interface FetchResponseInfo { + readonly type: "fetch"; + readonly statusCode: number; + } + interface ConnectEventInfo { + readonly type: "connect"; + } + type EventOutcome = "ok" | "canceled" | "exception" | "unknown" | "killSwitch" | "daemonDown" | "exceededCpu" | "exceededMemory" | "loadShed" | "responseStreamDisconnected" | "scriptNotFound" | "internalError"; + interface ScriptVersion { + readonly id: string; + readonly tag?: string; + readonly message?: string; + } + interface TracePreviewInfo { + readonly id: string; + readonly slug: string; + readonly name: string; + } + interface Onset { + readonly type: "onset"; + readonly attributes: Attribute[]; + // id for the span being opened by this Onset event. + readonly spanId: string; + readonly dispatchNamespace?: string; + readonly entrypoint?: string; + readonly executionModel: string; + readonly scriptName?: string; + readonly scriptTags?: string[]; + readonly scriptVersion?: ScriptVersion; + readonly preview?: TracePreviewInfo; + readonly info: FetchEventInfo | ConnectEventInfo | JsRpcEventInfo | ScheduledEventInfo | AlarmEventInfo | QueueEventInfo | EmailEventInfo | TraceEventInfo | HibernatableWebSocketEventInfo | CustomEventInfo; + } + interface Outcome { + readonly type: "outcome"; + readonly outcome: EventOutcome; + readonly cpuTime: number; + readonly wallTime: number; + } + interface SpanOpen { + readonly type: "spanOpen"; + readonly name: string; + // id for the span being opened by this SpanOpen event. + readonly spanId: string; + readonly info?: FetchEventInfo | JsRpcEventInfo | Attributes; + } + interface SpanClose { + readonly type: "spanClose"; + readonly outcome: EventOutcome; + } + interface DiagnosticChannelEvent { + readonly type: "diagnosticChannel"; + readonly channel: string; + readonly message: any; + } + interface Exception { + readonly type: "exception"; + readonly name: string; + readonly message: string; + readonly stack?: string; + } + interface Log { + readonly type: "log"; + readonly level: "debug" | "error" | "info" | "log" | "warn"; + readonly message: object; + } + interface DroppedEventsDiagnostic { + readonly diagnosticsType: "droppedEvents"; + readonly count: number; + } + interface StreamDiagnostic { + readonly type: 'streamDiagnostic'; + // To add new diagnostic types, define a new interface and add it to this union type. + readonly diagnostic: DroppedEventsDiagnostic; + } + // This marks the worker handler return information. + // This is separate from Outcome because the worker invocation can live for a long time after + // returning. For example - Websockets that return an http upgrade response but then continue + // streaming information or SSE http connections. + interface Return { + readonly type: "return"; + readonly info?: FetchResponseInfo; + } + interface Attribute { + readonly name: string; + readonly value: string | string[] | boolean | boolean[] | number | number[] | bigint | bigint[]; + } + interface Attributes { + readonly type: "attributes"; + readonly info: Attribute[]; + } + type EventType = Onset | Outcome | SpanOpen | SpanClose | DiagnosticChannelEvent | Exception | Log | StreamDiagnostic | Return | Attributes; + // Context in which this trace event lives. + interface SpanContext { + // Single id for the entire top-level invocation + // This should be a new traceId for the first worker stage invoked in the eyeball request and then + // same-account service-bindings should reuse the same traceId but cross-account service-bindings + // should use a new traceId. + readonly traceId: string; + // spanId in which this event is handled + // for Onset and SpanOpen events this would be the parent span id + // for Outcome and SpanClose these this would be the span id of the opening Onset and SpanOpen events + // For Hibernate and Mark this would be the span under which they were emitted. + // spanId is not set ONLY if: + // 1. This is an Onset event + // 2. We are not inheriting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation) + readonly spanId?: string; + } + interface TailEvent { + // invocation id of the currently invoked worker stage. + // invocation id will always be unique to every Onset event and will be the same until the Outcome event. + readonly invocationId: string; + // Inherited spanContext for this event. + readonly spanContext: SpanContext; + readonly timestamp: Date; + readonly sequence: number; + readonly event: Event; + } + type TailEventHandler = (event: TailEvent) => void | Promise; + type TailEventHandlerObject = { + outcome?: TailEventHandler; + spanOpen?: TailEventHandler; + spanClose?: TailEventHandler; + diagnosticChannel?: TailEventHandler; + exception?: TailEventHandler; + log?: TailEventHandler; + return?: TailEventHandler; + attributes?: TailEventHandler; + }; + type TailEventHandlerType = TailEventHandler | TailEventHandlerObject; +} +// Copyright (c) 2022-2023 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +/** + * Data types supported for holding vector metadata. + */ +type VectorizeVectorMetadataValue = string | number | boolean | string[]; +/** + * Additional information to associate with a vector. + */ +type VectorizeVectorMetadata = VectorizeVectorMetadataValue | Record; +type VectorFloatArray = Float32Array | Float64Array; +interface VectorizeError { + code?: number; + error: string; +} +/** + * Comparison logic/operation to use for metadata filtering. + * + * This list is expected to grow as support for more operations are released. + */ +type VectorizeVectorMetadataFilterOp = '$eq' | '$ne' | '$lt' | '$lte' | '$gt' | '$gte'; +type VectorizeVectorMetadataFilterCollectionOp = '$in' | '$nin'; +/** + * Filter criteria for vector metadata used to limit the retrieved query result set. + */ +type VectorizeVectorMetadataFilter = { + [field: string]: Exclude | null | { + [Op in VectorizeVectorMetadataFilterOp]?: Exclude | null; + } | { + [Op in VectorizeVectorMetadataFilterCollectionOp]?: Exclude[]; + }; +}; +/** + * Supported distance metrics for an index. + * Distance metrics determine how other "similar" vectors are determined. + */ +type VectorizeDistanceMetric = "euclidean" | "cosine" | "dot-product"; +/** + * Metadata return levels for a Vectorize query. + * + * Default to "none". + * + * @property all Full metadata for the vector return set, including all fields (including those un-indexed) without truncation. This is a more expensive retrieval, as it requires additional fetching & reading of un-indexed data. + * @property indexed Return all metadata fields configured for indexing in the vector return set. This level of retrieval is "free" in that no additional overhead is incurred returning this data. However, note that indexed metadata is subject to truncation (especially for larger strings). + * @property none No indexed metadata will be returned. + */ +type VectorizeMetadataRetrievalLevel = "all" | "indexed" | "none"; +interface VectorizeQueryOptions { + topK?: number; + namespace?: string; + returnValues?: boolean; + returnMetadata?: boolean | VectorizeMetadataRetrievalLevel; + filter?: VectorizeVectorMetadataFilter; +} +/** + * Information about the configuration of an index. + */ +type VectorizeIndexConfig = { + dimensions: number; + metric: VectorizeDistanceMetric; +} | { + preset: string; // keep this generic, as we'll be adding more presets in the future and this is only in a read capacity +}; +/** + * Metadata about an existing index. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link VectorizeIndexInfo} for its post-beta equivalent. + */ +interface VectorizeIndexDetails { + /** The unique ID of the index */ + readonly id: string; + /** The name of the index. */ + name: string; + /** (optional) A human readable description for the index. */ + description?: string; + /** The index configuration, including the dimension size and distance metric. */ + config: VectorizeIndexConfig; + /** The number of records containing vectors within the index. */ + vectorsCount: number; +} +/** + * Metadata about an existing index. + */ +interface VectorizeIndexInfo { + /** The number of records containing vectors within the index. */ + vectorCount: number; + /** Number of dimensions the index has been configured for. */ + dimensions: number; + /** ISO 8601 datetime of the last processed mutation on in the index. All changes before this mutation will be reflected in the index state. */ + processedUpToDatetime: number; + /** UUIDv4 of the last mutation processed by the index. All changes before this mutation will be reflected in the index state. */ + processedUpToMutation: number; +} +/** + * Represents a single vector value set along with its associated metadata. + */ +interface VectorizeVector { + /** The ID for the vector. This can be user-defined, and must be unique. It should uniquely identify the object, and is best set based on the ID of what the vector represents. */ + id: string; + /** The vector values */ + values: VectorFloatArray | number[]; + /** The namespace this vector belongs to. */ + namespace?: string; + /** Metadata associated with the vector. Includes the values of other fields and potentially additional details. */ + metadata?: Record; +} +/** + * Represents a matched vector for a query along with its score and (if specified) the matching vector information. + */ +type VectorizeMatch = Pick, "values"> & Omit & { + /** The score or rank for similarity, when returned as a result */ + score: number; +}; +/** + * A set of matching {@link VectorizeMatch} for a particular query. + */ +interface VectorizeMatches { + matches: VectorizeMatch[]; + count: number; +} +/** + * Results of an operation that performed a mutation on a set of vectors. + * Here, `ids` is a list of vectors that were successfully processed. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link VectorizeAsyncMutation} for its post-beta equivalent. + */ +interface VectorizeVectorMutation { + /* List of ids of vectors that were successfully processed. */ + ids: string[]; + /* Total count of the number of processed vectors. */ + count: number; +} +/** + * Result type indicating a mutation on the Vectorize Index. + * Actual mutations are processed async where the `mutationId` is the unique identifier for the operation. + */ +interface VectorizeAsyncMutation { + /** The unique identifier for the async mutation operation containing the changeset. */ + mutationId: string; +} +/** + * A Vectorize Vector Search Index for querying vectors/embeddings. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link Vectorize} for its new implementation. + */ +declare abstract class VectorizeIndex { + /** + * Get information about the currently bound index. + * @returns A promise that resolves with information about the current index. + */ + public describe(): Promise; + /** + * Use the provided vector to perform a similarity search across the index. + * @param vector Input vector that will be used to drive the similarity search. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise; + /** + * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown. + * @param vectors List of vectors that will be inserted. + * @returns A promise that resolves with the ids & count of records that were successfully processed. + */ + public insert(vectors: VectorizeVector[]): Promise; + /** + * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values. + * @param vectors List of vectors that will be upserted. + * @returns A promise that resolves with the ids & count of records that were successfully processed. + */ + public upsert(vectors: VectorizeVector[]): Promise; + /** + * Delete a list of vectors with a matching id. + * @param ids List of vector ids that should be deleted. + * @returns A promise that resolves with the ids & count of records that were successfully processed (and thus deleted). + */ + public deleteByIds(ids: string[]): Promise; + /** + * Get a list of vectors with a matching id. + * @param ids List of vector ids that should be returned. + * @returns A promise that resolves with the raw unscored vectors matching the id set. + */ + public getByIds(ids: string[]): Promise; +} +/** + * A Vectorize Vector Search Index for querying vectors/embeddings. + * + * Mutations in this version are async, returning a mutation id. + */ +declare abstract class Vectorize { + /** + * Get information about the currently bound index. + * @returns A promise that resolves with information about the current index. + */ + public describe(): Promise; + /** + * Use the provided vector to perform a similarity search across the index. + * @param vector Input vector that will be used to drive the similarity search. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise; + /** + * Use the provided vector-id to perform a similarity search across the index. + * @param vectorId Id for a vector in the index against which the index should be queried. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public queryById(vectorId: string, options?: VectorizeQueryOptions): Promise; + /** + * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown. + * @param vectors List of vectors that will be inserted. + * @returns A promise that resolves with a unique identifier of a mutation containing the insert changeset. + */ + public insert(vectors: VectorizeVector[]): Promise; + /** + * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values. + * @param vectors List of vectors that will be upserted. + * @returns A promise that resolves with a unique identifier of a mutation containing the upsert changeset. + */ + public upsert(vectors: VectorizeVector[]): Promise; + /** + * Delete a list of vectors with a matching id. + * @param ids List of vector ids that should be deleted. + * @returns A promise that resolves with a unique identifier of a mutation containing the delete changeset. + */ + public deleteByIds(ids: string[]): Promise; + /** + * Get a list of vectors with a matching id. + * @param ids List of vector ids that should be returned. + * @returns A promise that resolves with the raw unscored vectors matching the id set. + */ + public getByIds(ids: string[]): Promise; +} +/** + * The interface for "version_metadata" binding + * providing metadata about the Worker Version using this binding. + */ +type WorkerVersionMetadata = { + /** The ID of the Worker Version using this binding */ + id: string; + /** The tag of the Worker Version using this binding */ + tag: string; + /** The timestamp of when the Worker Version was uploaded */ + timestamp: string; +}; +interface DynamicDispatchLimits { + /** + * Limit CPU time in milliseconds. + */ + cpuMs?: number; + /** + * Limit number of subrequests. + */ + subRequests?: number; +} +interface DynamicDispatchOptions { + /** + * Limit resources of invoked Worker script. + */ + limits?: DynamicDispatchLimits; + /** + * Arguments for outbound Worker script, if configured. + */ + outbound?: { + [key: string]: any; + }; +} +interface DispatchNamespace { + /** + * @param name Name of the Worker script. + * @param args Arguments to Worker script. + * @param options Options for Dynamic Dispatch invocation. + * @returns A Fetcher object that allows you to send requests to the Worker script. + * @throws If the Worker script does not exist in this dispatch namespace, an error will be thrown. + */ + get(name: string, args?: { + [key: string]: any; + }, options?: DynamicDispatchOptions): Fetcher; +} +declare module 'cloudflare:workflows' { + /** + * NonRetryableError allows for a user to throw a fatal error + * that makes a Workflow instance fail immediately without triggering a retry + */ + export class NonRetryableError extends Error { + public constructor(message: string, name?: string); + } +} +declare abstract class Workflow { + /** + * Get a handle to an existing instance of the Workflow. + * @param id Id for the instance of this Workflow + * @returns A promise that resolves with a handle for the Instance + */ + public get(id: string): Promise; + /** + * Create a new instance and return a handle to it. If a provided id exists, an error will be thrown. + * @param options Options when creating an instance including id and params + * @returns A promise that resolves with a handle for the Instance + */ + public create(options?: WorkflowInstanceCreateOptions): Promise; + /** + * Create a batch of instances and return handle for all of them. If a provided id exists, an error will be thrown. + * `createBatch` is limited at 100 instances at a time or when the RPC limit for the batch (1MiB) is reached. + * @param batch List of Options when creating an instance including name and params + * @returns A promise that resolves with a list of handles for the created instances. + */ + public createBatch(batch: WorkflowInstanceCreateOptions[]): Promise; +} +type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; +type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; +type WorkflowRetentionDuration = WorkflowSleepDuration; +interface WorkflowInstanceCreateOptions { + /** + * An id for your Workflow instance. Must be unique within the Workflow. + */ + id?: string; + /** + * The event payload the Workflow instance is triggered with + */ + params?: PARAMS; + /** + * The retention policy for Workflow instance. + * Defaults to the maximum retention period available for the owner's account. + */ + retention?: { + successRetention?: WorkflowRetentionDuration; + errorRetention?: WorkflowRetentionDuration; + }; +} +type InstanceStatus = { + status: 'queued' // means that instance is waiting to be started (see concurrency limits) + | 'running' | 'paused' | 'errored' | 'terminated' // user terminated the instance while it was running + | 'complete' | 'waiting' // instance is hibernating and waiting for sleep or event to finish + | 'waitingForPause' // instance is finishing the current work to pause + | 'unknown'; + error?: { + name: string; + message: string; + }; + output?: unknown; +}; +interface WorkflowError { + code?: number; + message: string; +} +declare abstract class WorkflowInstance { + public id: string; + /** + * Pause the instance. + */ + public pause(): Promise; + /** + * Resume the instance. If it is already running, an error will be thrown. + */ + public resume(): Promise; + /** + * Terminate the instance. If it is errored, terminated or complete, an error will be thrown. + */ + public terminate(): Promise; + /** + * Restart the instance. + */ + public restart(): Promise; + /** + * Returns the current status of the instance. + */ + public status(): Promise; + /** + * Send an event to this instance. + */ + public sendEvent({ type, payload, }: { + type: string; + payload: unknown; + }): Promise; +} diff --git a/edge-api/wrangler.jsonc b/edge-api/wrangler.jsonc new file mode 100644 index 0000000000..d993d2b502 --- /dev/null +++ b/edge-api/wrangler.jsonc @@ -0,0 +1,39 @@ +/** + * Wrangler config for the edge-api Worker (Lab 17). + * Reference: https://developers.cloudflare.com/workers/wrangler/configuration/ + */ +{ + "$schema": "node_modules/wrangler/config-schema.json", + "name": "edge-api", + "main": "src/index.ts", + "compatibility_date": "2026-05-10", + "compatibility_flags": ["nodejs_compat"], + "upload_source_maps": true, + + // Workers Logs (Task 5 — observability) — keeps the last 24h of console.log() + // output queryable from the dashboard and via `wrangler tail`. + "observability": { + "enabled": true + }, + + // Plaintext variables (Task 4). Visible in the Cloudflare dashboard and bundled + // with the deploy — never put credentials here, use secrets instead. + "vars": { + "APP_NAME": "edge-api", + "COURSE_NAME": "devops-core" + }, + + // Workers KV namespace (Task 4). Replace `` with the id returned by: + // npx wrangler kv namespace create SETTINGS + "kv_namespaces": [ + { + "binding": "SETTINGS", + "id": "d8acf08371ae47c0b0c848b0a0bbf0e2" + } + ] + + // Secrets (Task 4) are NOT declared here — set them at deploy time: + // npx wrangler secret put API_TOKEN + // npx wrangler secret put ADMIN_EMAIL + // Bound to env.API_TOKEN and env.ADMIN_EMAIL inside the Worker. +} diff --git a/k8s/.gitignore b/k8s/.gitignore new file mode 100644 index 0000000000..7eb3192a95 --- /dev/null +++ b/k8s/.gitignore @@ -0,0 +1,2 @@ +tls.key +tls.crt diff --git a/k8s/ARGOCD.md b/k8s/ARGOCD.md new file mode 100644 index 0000000000..040de7de89 --- /dev/null +++ b/k8s/ARGOCD.md @@ -0,0 +1,480 @@ +# ArgoCD GitOps — Lab 13 + +> GitOps continuous deployment for the `devops-info-python` Helm chart from Labs 10–12, managed by ArgoCD. + +## Quick Facts + +- **ArgoCD chart:** `argo/argo-cd` version `7.7.22` (ArgoCD server `v2.13.4`). +- **Cluster:** local minikube (Docker driver), namespace `argocd`. +- **Source repo:** `https://github.com/AEZuraa/DevOps-Core-Course.git`, branch `lab13`. +- **Helm chart:** `k8s/devops-info-python`. +- **Applications deployed:** `python-app` (default, manual), `python-app-dev` (dev, auto-sync + selfHeal + prune), `python-app-prod` (prod, manual), plus `python-appset-dev` and `python-appset-prod` generated by an `ApplicationSet`. + +--- + +## 1. ArgoCD Setup + +### 1.1 Install via Helm + +```bash +helm repo add argo https://argoproj.github.io/argo-helm +helm repo update +kubectl create namespace argocd +helm install argocd argo/argo-cd \ + --namespace argocd \ + --version 7.7.22 \ + --set configs.params."server\.insecure"=true +kubectl -n argocd wait --for=condition=ready pod \ + -l app.kubernetes.io/name=argocd-server --timeout=300s +``` + +`server.insecure=true` lets the ArgoCD server accept HTTP from the port-forward, which matches the `argocd login … --plaintext` client flag. Without it, the `argocd` CLI negotiates gRPC over TLS through the port-forward and gets `gRPC connection not ready: context deadline exceeded`. + +### 1.2 Access the UI + +```bash +kubectl port-forward svc/argocd-server -n argocd 8080:80 +``` + +Open `http://localhost:8080`, log in with `admin` and the initial password: + +```bash +kubectl -n argocd get secret argocd-initial-admin-secret \ + -o jsonpath="{.data.password}" | base64 -d +``` + +![ArgoCD login page at localhost:8080](argocd/screenshots/argocd-login.png) + +### 1.3 Install and configure the CLI + +```bash +brew install argocd +argocd login localhost:8080 --plaintext --username admin \ + --password "$(kubectl -n argocd get secret argocd-initial-admin-secret \ + -o jsonpath="{.data.password}" | base64 -d)" +argocd account get-user-info +``` + +--- + +## 2. Application Configuration + +All three manifests live in [`k8s/argocd/`](argocd/). They share the same `repoURL`, `targetRevision: lab13`, and `path: k8s/devops-info-python`. + +| Manifest | App name | Namespace | Values files | Sync policy | +|---|---|---|---|---| +| [`application.yaml`](argocd/application.yaml) | `python-app` | `default` | `values.yaml` | Manual | +| [`application-dev.yaml`](argocd/application-dev.yaml) | `python-app-dev` | `dev` | `values.yaml`, `values-dev.yaml` | `automated` + `prune` + `selfHeal` | +| [`application-prod.yaml`](argocd/application-prod.yaml) | `python-app-prod` | `prod` | `values.yaml`, `values-prod.yaml` | Manual | + +Common sync option: `CreateNamespace=true` so ArgoCD creates `dev` / `prod` on first sync. + +### Values per environment + +| | default | dev | prod | +|---|---|---|---| +| `replicaCount` | 3 | 2 *(bumped from 1 to exercise the GitOps workflow in §5)* | 5 | +| `resources.requests` | 100m / 128Mi | 50m / 64Mi | 200m / 256Mi | +| `resources.limits` | 200m / 256Mi | 100m / 128Mi | 500m / 512Mi | +| `service.type` | NodePort | NodePort | LoadBalancer | +| `service.nodePort` | 30080 | 30081 | — | + +Dev uses nodePort `30081` (bumped from `30080`) so `python-app` and `python-app-dev` can coexist on the same minikube node. + +### Deploy and sync + +```bash +kubectl apply -f k8s/argocd/application.yaml \ + -f k8s/argocd/application-dev.yaml \ + -f k8s/argocd/application-prod.yaml + +argocd app sync python-app # manual: default +argocd app sync python-app-prod # manual: prod +# python-app-dev syncs automatically +``` + +Local image build (minikube can't pull `linux/arm64` from Docker Hub): + +```bash +docker build -t aezuraa/devops-info-service:python app_python/ +minikube image load aezuraa/devops-info-service:python +``` + +All five Applications (three individual + two from the ApplicationSet in §6) visible in the UI: + +![ArgoCD Applications overview — all 5 apps Synced/Healthy](argocd/screenshots/argocd-apps-overview.png) + +From the CLI: + +``` +$ argocd app list +NAME NAMESPACE PROJECT STATUS HEALTH SYNCPOLICY TARGET +argocd/python-app default default Synced Healthy Manual lab13 +argocd/python-app-dev dev default Synced Healthy Auto-Prune lab13 +argocd/python-app-prod prod default Synced Progressing Manual lab13 +argocd/python-appset-dev appset-dev default Synced Healthy Auto-Prune lab13 +argocd/python-appset-prod appset-prod default Synced Healthy Manual lab13 +``` + +All five Applications point to `repoURL: https://github.com/AEZuraa/DevOps-Core-Course.git`, `path: k8s/devops-info-python`. `python-app-prod` shows `Progressing` only because its `LoadBalancer` Service waits for an external IP in minikube without `minikube tunnel` — the Deployment itself is `Synced / Healthy`. + +--- + +## 3. Multi-Environment + +### Sync-policy rationale + +| Stage | Policy | Why | +|---|---|---| +| **dev** | `automated` + `prune` + `selfHeal` | Fast feedback. Every push lands within the 3-minute reconcile interval, manual `kubectl` changes get reverted so the cluster matches Git, deleted objects are pruned. | +| **prod** | Manual | Human review of the diff before release. Change is staged in Git, an operator runs `argocd app sync python-app-prod` after approval. Rollback = `git revert` + sync. | + +### Namespace separation + +- `dev` — single replica, tight resource requests, NodePort 30081. +- `prod` — five replicas, bigger requests, LoadBalancer (pending external IP in plain minikube; use `minikube tunnel` to expose). + +Resources live in distinct namespaces, so secrets / configmaps / PVCs do not collide. + +### UI — resource tree per environment + +Dev (auto-sync, 2 replicas): + +![python-app-dev resource tree](argocd/screenshots/argocd-python-app-dev-tree.png) + +Prod (manual sync, 5 replicas): + +![python-app-prod resource tree](argocd/screenshots/argocd-python-app-prod-sync.png) + +### Pod inventory per environment + +``` +$ kubectl get pods -n default +NAME READY STATUS RESTARTS AGE +python-app-devops-info-python-67f5458c7c-6qbxh 1/1 Running 0 53m +python-app-devops-info-python-67f5458c7c-l5wqv 1/1 Running 0 53m +python-app-devops-info-python-67f5458c7c-qzjsf 1/1 Running 0 53m + +$ kubectl get pods -n dev +NAME READY STATUS RESTARTS AGE +python-app-dev-devops-info-python-855bd58b5-2kwxt 1/1 Running 0 64m +python-app-dev-devops-info-python-855bd58b5-hx9bk 1/1 Running 0 9m17s + +$ kubectl get pods -n prod +NAME READY STATUS RESTARTS AGE +python-app-prod-devops-info-python-699f869975-bxcwj 1/1 Running 0 51m +python-app-prod-devops-info-python-699f869975-q476z 1/1 Running 0 51m +python-app-prod-devops-info-python-699f869975-rwrzw 1/1 Running 0 51m +python-app-prod-devops-info-python-699f869975-w7r2t 1/1 Running 0 51m +python-app-prod-devops-info-python-699f869975-xncsf 1/1 Running 0 51m +python-app-prod-devops-info-python-pre-install-xbqxv 0/1 Completed 0 51m +``` + +`default`, `dev`, and `prod` have 3 / 2 / 5 running pods — matching `replicaCount` in `values.yaml`, `values-dev.yaml` (bumped from 1 to 2 during the GitOps test in §5), and `values-prod.yaml` respectively. The `pre-install` Completed pod in `prod` is a finished Helm `PreSync` hook Job. + +### ArgoCD `app get` per environment + +``` +$ argocd app get python-app-dev +Name: argocd/python-app-dev +Namespace: dev +Source: +- Repo: https://github.com/AEZuraa/DevOps-Core-Course.git + Target: lab13 + Path: k8s/devops-info-python + Helm Values: values.yaml,values-dev.yaml +Sync Policy: Automated (Prune) +Sync Status: Synced to lab13 (ccac837) +Health Status: Healthy + +GROUP KIND NAMESPACE NAME STATUS HEALTH +apps Deployment dev python-app-dev-devops-info-python Synced Healthy + ConfigMap dev python-app-dev-devops-info-python-config Synced + ConfigMap dev python-app-dev-devops-info-python-env Synced + PersistentVolumeClaim dev python-app-dev-devops-info-python-data Synced Healthy + Secret dev python-app-dev-devops-info-python-secret Synced + Service dev python-app-dev-devops-info-python Synced Healthy + ServiceAccount dev python-app-dev-devops-info-python Synced +``` + +``` +$ argocd app get python-app-prod +Name: argocd/python-app-prod +Namespace: prod +Source: +- Repo: https://github.com/AEZuraa/DevOps-Core-Course.git + Target: lab13 + Path: k8s/devops-info-python + Helm Values: values.yaml,values-prod.yaml +Sync Policy: Manual +Sync Status: Synced to lab13 (ccac837) ← prod renders the same output at ccac837 (only values-dev.yaml changed) +Health Status: Progressing + +GROUP KIND NAMESPACE NAME STATUS HEALTH HOOK +batch Job prod python-app-prod-devops-info-python-pre-install Succeeded PreSync + ServiceAccount prod python-app-prod-devops-info-python Synced + Secret prod python-app-prod-devops-info-python-secret Synced + ConfigMap prod python-app-prod-devops-info-python-config Synced + ConfigMap prod python-app-prod-devops-info-python-env Synced + PersistentVolumeClaim prod python-app-prod-devops-info-python-data Synced Healthy + Service prod python-app-prod-devops-info-python Synced Progressing +apps Deployment prod python-app-prod-devops-info-python Synced Healthy +``` + +Dev has `Sync Policy: Automated (Prune)` with `selfHeal` enabled; Prod stays `Manual`. + +### Deployment workflow + +``` + Git (branch: lab13) + │ + ┌───────────────┴───────────────┐ + │ │ + reconcile reconcile + every 3m every 3m + │ │ + ▼ ▼ + python-app-dev python-app-prod + auto-sync ✓ manual sync + selfHeal ✓ (operator: argocd app sync) + prune ✓ + │ │ + ▼ ▼ + namespace: dev namespace: prod + replicas: 2 replicas: 5 +``` + +--- + +## 4. Self-Healing Evidence + +All tests were run against `python-app-dev` (dev, auto-sync + selfHeal). + +### 4.1 Manual scale test + +``` +$ date -u +%FT%TZ +2026-04-17T16:21:29Z +$ kubectl get deployment python-app-dev-devops-info-python -n dev -o jsonpath='{.spec.replicas}' +1 +$ kubectl scale deployment python-app-dev-devops-info-python -n dev --replicas=5 +deployment.apps/python-app-dev-devops-info-python scaled +# pods observed: 1 Running + 4 ContainerCreating/Pending +# poll every 3s: +2026-04-17T16:21:37Z - replicas: 1 ← ArgoCD reverted in under 10 s +``` + +ArgoCD detected that `.spec.replicas=5` diverged from Git (`values-dev.yaml: replicaCount=1`) and patched the deployment back within the next reconcile (selfHeal runs right after detection). + +### 4.2 Pod deletion test + +``` +$ date -u +%FT%TZ +2026-04-17T16:21:59Z +$ kubectl delete pod -n dev python-app-dev-devops-info-python-855bd58b5-dfffv +pod "…dfffv" deleted from dev namespace +# 0 s later +python-app-dev-devops-info-python-855bd58b5-2kwxt 1/1 Running 31s +``` + +A replacement pod appeared immediately — but this is **Kubernetes**, not ArgoCD. The Deployment controller reconciles its ReplicaSet against the desired replica count. ArgoCD does not see drift here because the Deployment spec never changed; only the pod set did, and pods are owned by the ReplicaSet. + +### 4.3 Configuration drift test (container image) + +``` +$ date -u +%FT%TZ +2026-04-17T16:23:20Z +$ kubectl get deployment python-app-dev-devops-info-python -n dev \ + -o jsonpath='{.spec.template.spec.containers[0].image}' +aezuraa/devops-info-service:python +$ kubectl set image deployment/python-app-dev-devops-info-python -n dev \ + devops-info-python=aezuraa/devops-info-service:python-drift +deployment.apps/python-app-dev-devops-info-python image updated +# poll every 3s: +2026-04-17T16:23:23Z - image: aezuraa/devops-info-service:python ← self-healed in ~3 s +``` + +`.spec.template.spec.containers[0].image` is a field rendered by the chart, so ArgoCD tracks it and reverts the change. + +![ArgoCD diff view — drifted image tag vs Git](argocd/screenshots/argocd-diff-view.png) + +![Sync / selfHeal event on python-app-dev](argocd/screenshots/argocd-self-heal-event.png) + +Note: a drift that is **additive and ignored** by the managed-fields owner (e.g. `kubectl label deployment … drift-test=manual`) is *not* treated as drift. ArgoCD compares its own rendered manifest to the live object via strategic merge; extra labels added outside the controlled set are merged, not reverted. This is [documented behavior](https://argo-cd.readthedocs.io/en/stable/user-guide/diffing/). + +### 4.4 K8s self-healing vs ArgoCD self-healing + +| | Kubernetes | ArgoCD | +|---|---|---| +| **Scope** | pods / replicas (RS, Deployment) | any resource ArgoCD tracks | +| **Trigger** | `ownerReferences` + controller reconcile | poll Git (default 3 min) + webhook + explicit `refresh` | +| **Example fix** | pod dies → RS creates a new one | `kubectl set image` → ArgoCD re-applies rendered manifest | +| **Source of truth** | the resource's `.spec` in etcd | Git (rendered Helm output) | + +### 4.5 What triggers an ArgoCD sync? + +1. **Reconcile timer** — default `timeout.reconciliation=3m` (polls repo, re-renders, compares). +2. **Explicit refresh** — `argocd app refresh ` or the UI "Refresh" button forces an immediate diff. +3. **Webhook** — a GitHub/GitLab webhook hitting `/api/webhook` fires an instant sync (configured on the `argocd-server` service). +4. **Manual sync** — `argocd app sync ` applies the diff immediately, regardless of drift. +5. **Self-heal** — with `automated.selfHeal=true`, any detected drift kicks off a sync on the next reconcile. + +--- + +## 5. GitOps Workflow Test + +The full round-trip with `python-app-dev` (auto-sync enabled): + +```bash +# 1. Make a change locally +sed -i '' 's/^replicaCount: 1$/replicaCount: 2/' \ + k8s/devops-info-python/values-dev.yaml + +# 2. Commit + push +git add k8s/devops-info-python/values-dev.yaml +git commit -m "chore(dev): bump dev replicas to 2" +git push origin lab13 + +# 3. Wait for reconcile OR force refresh (hard-refresh busts the repo-server cache) +argocd app get python-app-dev --hard-refresh + +# 4. Observe +argocd app get python-app-dev | grep -E "Sync Status|Revision" +kubectl get deployment python-app-dev-devops-info-python -n dev \ + -o jsonpath='{.spec.replicas}'; echo +``` + +### Live run + +Starting point: dev was synced to the previous commit `e11dc11` with 1 replica. After pushing `ccac837` (the commit that flips `replicaCount: 1 → 2` in `values-dev.yaml`): + +``` +$ argocd app get python-app-dev | grep -E "Sync Status|Health" +Sync Status: Synced to lab13 (e11dc11) ← repo-server cache, Git already has ccac837 +Health Status: Healthy + +$ argocd app get python-app-dev --hard-refresh | grep -E "Sync Status|Health" +Sync Status: OutOfSync from lab13 (ccac837) ← drift detected +Health Status: Healthy + +# automated sync kicks in ~13 s later +2026-04-17T17:17:37Z | Synced to lab13 (ccac837) | replicas=2 pods=1 +2026-04-17T17:17:42Z | Synced to lab13 (ccac837) | replicas=2 pods=2 + +$ argocd app history python-app-dev +ID DATE REVISION +0 2026-04-17 19:19:25 +0300 MSK lab13 (e11dc11) ← initial sync +1 2026-04-17 19:21:04 +0300 MSK lab13 (e11dc11) ← re-sync after Application spec edit +2 2026-04-17 20:17:59 +0300 MSK lab13 (ccac837) ← GitOps sync triggered by push +``` + +End state — two pods in dev, both from the new ReplicaSet: + +``` +$ kubectl get pods -n dev +NAME READY STATUS RESTARTS AGE +python-app-dev-devops-info-python-855bd58b5-2kwxt 1/1 Running 0 64m +python-app-dev-devops-info-python-855bd58b5-hx9bk 1/1 Running 0 9m17s +``` + +Why `--hard-refresh` was needed: `argocd app refresh` only re-runs the diff against the cached checkout; `--hard-refresh` additionally forces the repo-server to re-clone and pick up the new commit. In production a GitHub webhook on `/api/webhook` makes this instant, and the 3-minute reconcile timer would pick it up anyway within `timeout.reconciliation`. + +For `python-app-prod` (manual) the same push would produce `Sync Status: OutOfSync` until an operator runs `argocd app sync python-app-prod` — only if the push changes something rendered by `values.yaml` or `values-prod.yaml`. The `ccac837` commit only touched `values-dev.yaml`, so prod renders identically and stays `Synced` after the hard-refresh. To force a manual-sync demo, change a field used by prod (e.g. bump `replicaCount` in `values-prod.yaml`) and observe `python-app-prod` go OutOfSync. + +--- + +## 6. ApplicationSet (Bonus) + +[`k8s/argocd/applicationset.yaml`](argocd/applicationset.yaml) uses the **List generator** to render two Applications from one template. + +### Why ApplicationSet over individual Applications + +| | Individual `Application` | `ApplicationSet` | +|---|---|---| +| Lines of YAML | 3 files × ~25 lines | 1 file × ~50 lines | +| Adding a new env | copy-paste a new file | add one list element | +| Per-env overrides | full spec duplicated | element parameters + `templatePatch` | +| Scaling to N envs / clusters | O(N) edits | O(1) template | + +### Generator types (when to use which) + +- **List** — small, explicit env set (what we use here: dev / prod). +- **Cluster** — one app per registered cluster (fan-out a monitoring stack across staging/prod clusters). +- **Git Directory** — monorepo where each subdirectory is a chart; the generator watches `k8s/*` and creates an app per folder. +- **Git Files** — each file matching a glob (e.g. `environments/*.yaml`) produces one Application with that file's parameters. +- **Matrix** — Cartesian product of two generators (e.g. Clusters × Git Files). +- **Merge** — element-wise merge (e.g. Cluster generator + per-cluster overrides file). + +### Per-env divergence + +`templatePatch` uses Go templating to inject the `automated` sync policy only for `env == dev`: + +```yaml +templatePatch: | + {{- if eq .env "dev" }} + spec: + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + {{- end }} +``` + +Helm parameter overrides force `service.type=ClusterIP` (and clear `nodePort`) so the generated apps can coexist with `python-app-dev`/`python-app-prod` on the same cluster without port collisions. + +### Result + +``` +$ argocd appset list +NAME PROJECT SYNCPOLICY CONDITIONS +argocd/python-app-set default nil [{ParametersGenerated Successfully generated parameters for all Applications … True ParametersGenerated} + {ResourcesUpToDate ApplicationSet up to date … True ApplicationSetUpToDate}] +``` + +``` +$ argocd app list | awk 'NR==1 || /appset/' +NAME NAMESPACE SYNCPOLICY STATUS HEALTH +argocd/python-appset-dev appset-dev Auto-Prune Synced Healthy +argocd/python-appset-prod appset-prod Manual Synced Healthy +``` + +Matching pod inventory: + +``` +$ kubectl get pods -n appset-dev +NAME READY STATUS AGE +python-appset-dev-devops-info-python-5c6bb8fcc8-h6mts 1/1 Running 42m + +$ kubectl get pods -n appset-prod +NAME READY STATUS AGE +python-appset-prod-devops-info-python-66d8d7d66f-8z5kn 1/1 Running 44m +python-appset-prod-devops-info-python-66d8d7d66f-bvp8h 1/1 Running 44m +python-appset-prod-devops-info-python-66d8d7d66f-fs6km 1/1 Running 44m +python-appset-prod-devops-info-python-66d8d7d66f-hxvtl 1/1 Running 44m +python-appset-prod-devops-info-python-66d8d7d66f-wmzk6 1/1 Running 44m +``` + +Deleting `applicationset.yaml` cascades-deletes both generated Applications (and their resources, thanks to `resources-finalizer.argocd.argoproj.io` inherited via the template). + +--- + +## 7. Verification Commands + +```bash +# Status +argocd app list +argocd appset list +kubectl get pods -A | grep -E 'argocd|default|dev|prod|appset' + +# Dev replicas (2 from values-dev.yaml) +kubectl get deploy -n dev -o jsonpath='{.items[0].spec.replicas}'; echo + +# Prod replicas (5 from values-prod.yaml) +kubectl get deploy -n prod -o jsonpath='{.items[0].spec.replicas}'; echo + +# Force reconcile and print sync status +argocd app refresh python-app-dev +argocd app get python-app-dev | grep -E 'Sync Status|Health Status' +``` diff --git a/k8s/CONFIGMAPS.md b/k8s/CONFIGMAPS.md new file mode 100644 index 0000000000..a2d2ef49ac --- /dev/null +++ b/k8s/CONFIGMAPS.md @@ -0,0 +1,274 @@ +# ConfigMaps & Persistent Volumes + +## 1. Application Changes + +### Visits Counter + +Both apps (Python and Go) now have a file-backed visit counter: +- Each `GET /` request increments a counter stored in `/data/visits` +- New `GET /visits` endpoint returns the current count +- Thread-safe: uses `threading.Lock` (Python) / `sync.Mutex` (Go) +- Atomic writes via temp file + rename to prevent corruption + +### New Endpoint — `/visits` + +``` +GET /visits → { "visits": 42 } +``` + +### Local Testing with Docker + +Docker Compose volumes configured in `monitoring/docker-compose.yml`: + +```yaml +app-python: + volumes: + - app-python-data:/data + +app-go: + volumes: + - app-go-data:/data +``` + +**Test procedure:** +1. `docker compose up -d app-python` +2. `curl http://localhost:8000/` (repeat several times) +3. `curl http://localhost:8000/visits` — verify counter +4. `docker compose restart app-python` +5. `curl http://localhost:8000/visits` — counter preserved + +![Docker Compose: visits counter before and after container restart](docs/screenshots/docker-visits-test.png) + +--- + +## 2. ConfigMap Implementation + +### ConfigMap Template Structure + +Each chart has `templates/configmap.yaml` with **two ConfigMaps**: + +**1) File-based ConfigMap** (`*-config`) — mounts `config.json` as a file: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: --config +data: + config.json: |- + +``` + +**2) Env-var ConfigMap** (`*-env`) — injects key-value pairs as environment variables: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: --env +data: + APP_ENV: "dev" + LOG_LEVEL: "INFO" + APP_NAME: "devops-info-python" +``` + +### `config.json` Content + +```json +{ + "app_name": "devops-info-python", + "environment": "dev", + "version": "1.0.0", + "features": { + "metrics_enabled": true, + "debug_mode": false, + "visits_tracking": true + }, + "logging": { + "level": "INFO", + "format": "json" + } +} +``` + +### How ConfigMap Is Mounted as File + +In `deployment.yaml`: + +```yaml +volumes: + - name: config-volume + configMap: + name: -config + +containers: + - volumeMounts: + - name: config-volume + mountPath: /config +``` + +The file becomes available at `/config/config.json` inside the pod. + +### How ConfigMap Provides Environment Variables + +```yaml +envFrom: + - configMapRef: + name: -env +``` + +This injects `APP_ENV`, `LOG_LEVEL`, `APP_NAME` as environment variables. + +### Verification + +```bash +# 1. List ConfigMaps and PVCs +kubectl get configmap,pvc + +# 2. Verify config file inside pod +kubectl exec -- cat /config/config.json + +# 3. Verify environment variables +kubectl exec -- printenv | grep -E 'APP_|LOG_' +``` + +![kubectl get configmap,pvc](docs/screenshots/configmap-pvc-list.png) + +![cat /config/config.json inside the pod](docs/screenshots/config-json-inside-pod.png) + +![APP_* and LOG_* environment variables inside the pod](docs/screenshots/env-vars-inside-pod.png) + +--- + +## 3. Persistent Volume + +### PVC Configuration + +`templates/pvc.yaml`: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: -data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +``` + +### Access Modes and Storage Class + +| Access Mode | Description | +|---|---| +| `ReadWriteOnce` (RWO) | Volume can be mounted as read-write by a single node | +| `ReadOnlyMany` (ROX) | Volume can be mounted as read-only by many nodes | +| `ReadWriteMany` (RWX) | Volume can be mounted as read-write by many nodes | + +We use `ReadWriteOnce` — sufficient for a single-replica deployment writing visit counts. + +**Storage class** is left empty (`""`) to use the cluster default. In Minikube this is `standard` (hostPath provisioner), which dynamically provisions PVs. + +### Volume Mount Configuration + +In `deployment.yaml`: + +```yaml +volumes: + - name: data-volume + persistentVolumeClaim: + claimName: -data + +containers: + - volumeMounts: + - name: data-volume + mountPath: /data +``` + +The app writes visit counts to `/data/visits`, which lives on the PVC. + +### Persistence Test Evidence + +**Test procedure:** +1. Deploy: `helm upgrade --install devops-python ./devops-info-python --set secrets.DB_USERNAME=admin --set secrets.DB_PASSWORD=S3cur3P@ssw0rd` +2. Access root endpoint: `curl http://:30080/` (several times) +3. Check counter: `curl http://:30080/visits` +4. Delete pod: `kubectl delete pod ` +5. Wait for new pod: `kubectl get pods -w` +6. Check counter again: `curl http://:30080/visits` — same value! + +![Visits count before pod deletion](docs/screenshots/visits-before-delete.png) + +![kubectl delete pod and new pod coming up](docs/screenshots/pod-delete.png) + +![Visits count after new pod starts (unchanged)](docs/screenshots/visits-after-delete.png) + +--- + +## 4. ConfigMap vs Secret + +| Aspect | ConfigMap | Secret | +|---|---|---| +| **Purpose** | Non-sensitive configuration | Sensitive data (passwords, tokens, keys) | +| **Encoding** | Plain text | Base64-encoded (not encrypted by default) | +| **Use cases** | App settings, feature flags, config files | DB passwords, API keys, TLS certificates | +| **Size limit** | 1 MiB | 1 MiB | +| **RBAC** | Standard access | Can be restricted with stricter RBAC policies | +| **etcd storage** | Plain | Can be encrypted at rest with EncryptionConfiguration | + +**When to use ConfigMap:** +- Application configuration files (`config.json`, `.env`) +- Feature flags, log levels, environment names +- Non-sensitive key-value settings + +**When to use Secret:** +- Database credentials +- API keys, OAuth tokens +- TLS certificates and private keys +- Any data that should not appear in logs or version control + +--- + +## 5. Bonus — ConfigMap Hot Reload + +### Update Delay (Kubelet Sync Period) + +When a ConfigMap is updated, mounted volumes are eventually updated by the kubelet. Default delay: **up to 60 seconds + cache TTL** (configurable via `--sync-frequency`). Total propagation can take 1–2 minutes. + +### `subPath` Limitation + +When mounting with `subPath`, the file is a **copy**, not a symlink. It will **not** receive automatic updates when the ConfigMap changes. Only full directory mounts (without `subPath`) get auto-updated via symlink rotation. + +**Use `subPath`** when you need to mount a single file into a directory without hiding other files. +**Avoid `subPath`** when you need automatic ConfigMap updates. + +### Chosen Approach — Checksum Annotation + +We use the **checksum annotation pattern** in the deployment template: + +```yaml +metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +``` + +**How it works:** +1. The annotation value is a SHA-256 hash of the rendered ConfigMap template +2. When ConfigMap content changes, the hash changes +3. This makes the pod template different, triggering a rolling update +4. `helm upgrade` detects the change and restarts pods automatically + +**Demonstration:** +1. Change `config.json` (e.g., set `"environment": "prod"`) +2. Run `helm upgrade --install devops-python ./devops-info-python --set secrets.DB_USERNAME=admin --set secrets.DB_PASSWORD=S3cur3P@ssw0rd` +3. Observe pods being recreated with new configuration + +![helm upgrade after ConfigMap change; pod rolling restart](docs/screenshots/configmap-hot-reload.png) + +### Alternative Approaches + +- **Stakater Reloader** — controller that watches ConfigMaps/Secrets and triggers rolling updates +- **Application file watching** — inotify/fsnotify to detect file changes and reload config +- **Manual `kubectl rollout restart`** — simple but not automated diff --git a/k8s/HELM.md b/k8s/HELM.md new file mode 100644 index 0000000000..1761143ef5 --- /dev/null +++ b/k8s/HELM.md @@ -0,0 +1,255 @@ +# Lab 10 — Helm Package Manager + +## 1. Chart Overview + +### Chart Structure + +``` +k8s/ +├── common-lib/ # Library chart (shared templates) +│ ├── Chart.yaml +│ └── templates/ +│ └── _helpers.tpl # Common name/label/selector helpers +├── devops-info-python/ # Python app chart +│ ├── Chart.yaml +│ ├── values.yaml # Default values +│ ├── values-dev.yaml # Dev environment overrides +│ ├── values-prod.yaml # Prod environment overrides +│ ├── charts/ # Dependency archives (auto-generated) +│ └── templates/ +│ ├── _helpers.tpl # Re-exports common-lib helpers +│ ├── deployment.yaml +│ ├── service.yaml +│ ├── NOTES.txt +│ └── hooks/ +│ ├── pre-install-job.yaml +│ └── post-install-job.yaml +└── devops-info-go/ # Go app chart + ├── Chart.yaml + ├── values.yaml + ├── charts/ + └── templates/ + ├── _helpers.tpl + ├── deployment.yaml + ├── service.yaml + └── NOTES.txt +``` + +### Key Template Files + +| File | Purpose | +|------|---------| +| `_helpers.tpl` | Name generation, labels, selector labels (DRY via common-lib) | +| `deployment.yaml` | Templated Deployment with configurable replicas, image, resources, probes | +| `service.yaml` | Templated Service with configurable type (NodePort/ClusterIP/LoadBalancer) | +| `hooks/pre-install-job.yaml` | Pre-install validation Job | +| `hooks/post-install-job.yaml` | Post-install smoke test Job | +| `NOTES.txt` | Dynamic post-install instructions | + +### Values Organization + +Values are structured hierarchically: +- `image.*` — repository, tag, pullPolicy +- `service.*` — type, port, targetPort, nodePort +- `resources.*` — CPU/memory requests and limits +- `livenessProbe.*` / `readinessProbe.*` — health check configuration +- `strategy.*` — deployment strategy +- `env` — environment variables list + +--- + +## 2. Configuration Guide + +### Important Values + +| Value | Default | Description | +|-------|---------|-------------| +| `replicaCount` | 3 | Number of pod replicas | +| `image.repository` | `aezuraa/devops-info-service` | Docker image | +| `image.tag` | `python` | Image tag | +| `image.pullPolicy` | `IfNotPresent` | Pull policy | +| `service.type` | `NodePort` | Service type | +| `service.port` | 80 | Service port | +| `service.targetPort` | 8080 | Container port | +| `resources.limits.cpu` | `200m` | CPU limit | +| `resources.limits.memory` | `256Mi` | Memory limit | +| `livenessProbe.initialDelaySeconds` | 10 | Liveness probe delay | +| `readinessProbe.initialDelaySeconds` | 5 | Readiness probe delay | + +### Environment Customization + +**Dev** (`values-dev.yaml`): 1 replica, relaxed resources (64Mi/50m), relaxed probe thresholds, NodePort. + +**Prod** (`values-prod.yaml`): 5 replicas, higher resources (512Mi/500m), strict probes, LoadBalancer. + +### Example Installations + +```bash +# Dev environment +helm install python-dev k8s/devops-info-python -f k8s/devops-info-python/values-dev.yaml + +# Prod environment +helm install python-prod k8s/devops-info-python -f k8s/devops-info-python/values-prod.yaml + +# Override specific value +helm install python-custom k8s/devops-info-python --set replicaCount=10 +``` + +--- + +## 3. Hook Implementation + +### Hooks + +| Hook | Type | Weight | Purpose | +|------|------|--------|---------| +| `pre-install-job.yaml` | `pre-install` | `-5` | Environment validation before deployment | +| `post-install-job.yaml` | `post-install` | `5` | Smoke test after deployment | + +### Execution Order + +1. Pre-install hook (weight -5) runs first — validates environment readiness +2. Main resources (Deployment, Service) are created +3. Post-install hook (weight 5) runs after — performs smoke test + +### Deletion Policies + +Both hooks use `hook-succeeded` — Jobs are automatically deleted after successful completion. This keeps the cluster clean; only failed hook Jobs remain for debugging. + +--- + +## 4. Installation Evidence + +### Helm Version + +``` +$ helm version +version.BuildInfo{Version:"v4.1.3", ...} +``` + +### helm list + +![helm list](docs/screenshots/helm_list.png) + +### kubectl get all + +![kubectl get all](docs/screenshots/kubectl_get_all_2.png) + +### Hook Execution + +Hooks execute during `helm install` and are deleted per `hook-succeeded` policy: + +![hooks](docs/screenshots/hooks_executed.png) + +### Dev vs Prod Deployments + +**Dev** (1 replica, NodePort): +![dev deployment](docs/screenshots/dev_deployment.png) + +**Prod** (5 replicas, LoadBalancer): +![prod deployment](docs/screenshots/prod_deployment.png) + +--- + +## 5. Operations + +### Install + +```bash +helm dependency update k8s/devops-info-python +helm install python-dev k8s/devops-info-python -f k8s/devops-info-python/values-dev.yaml +``` + +### Upgrade + +```bash +helm upgrade python-dev k8s/devops-info-python -f k8s/devops-info-python/values-prod.yaml +``` + +### Rollback + +```bash +helm history python-dev +helm rollback python-dev 1 +``` + +### Uninstall + +```bash +helm uninstall python-dev +``` + +--- + +## 6. Testing & Validation + +### helm lint + +``` +$ helm lint k8s/devops-info-python +==> Linting k8s/devops-info-python +[INFO] Chart.yaml: icon is recommended +1 chart(s) linted, 0 chart(s) failed +``` + +### helm template + +```bash +helm template test-release k8s/devops-info-python +``` + +Renders all templates locally without connecting to the cluster. Verified: Deployment, Service, and hook Jobs render correctly with proper labels, values substitution, and annotations. + +![helm template](docs/screenshots/helm_template.png) + +### Dry-run + +```bash +helm install --dry-run --debug test-release k8s/devops-info-python +``` + + +![helm dry run](docs/screenshots/helm_dry_run.png) + +### Application Accessibility + +![app accessible](docs/screenshots/app_accessible.png) + +--- + +## 7. Library Chart (Bonus) + +### Structure + +`k8s/common-lib/` — library chart (`type: library` in Chart.yaml), cannot be installed directly. + +Contains shared templates in `_helpers.tpl`: +- `common.name` — chart name generation +- `common.fullname` — release-qualified name generation +- `common.chart` — chart name + version string +- `common.labels` — standard Kubernetes labels (chart, name, instance, version, managed-by) +- `common.selectorLabels` — selector labels (name, instance) + +### Usage in App Charts + +Both `devops-info-python` and `devops-info-go` declare the dependency: + +```yaml +dependencies: + - name: common-lib + version: 0.1.0 + repository: "file://../common-lib" +``` + +Templates reference `common.*` helpers directly, eliminating duplication. + +### Benefits + +- **DRY**: Label/name logic defined once, used everywhere +- **Consistency**: All apps get identical labeling standards +- **Maintainability**: Change label format in one place, all charts update +- **Scalability**: New app charts just add the dependency + +### Both Apps Deployed + +![both apps](docs/screenshots/both_apps_deployed.png) diff --git a/k8s/MONITORING.md b/k8s/MONITORING.md new file mode 100644 index 0000000000..07af93de4e --- /dev/null +++ b/k8s/MONITORING.md @@ -0,0 +1,529 @@ +# Lab 16 — Kubernetes Monitoring & Init Containers + +End-to-end observability with the **kube-prometheus-stack** Helm chart and two +init container patterns layered on top of the existing +[`devops-info-python`](devops-info-python/) chart. Bonus task adds a custom +`/metrics` endpoint plus a `ServiceMonitor` so the application is scraped by +the Prometheus Operator. + +--- + +## 1. Stack Components + +The `prometheus-community/kube-prometheus-stack` chart bundles every piece +needed for cluster-grade monitoring. Roles in this lab: + +| Component | Role | +|---|---| +| **Prometheus Operator** | Kubernetes controller that watches `Prometheus`, `Alertmanager`, `ServiceMonitor`, `PrometheusRule` CRDs and reconciles them into Prometheus / Alertmanager StatefulSets and scrape configs. Lets us declare scrape targets as Kubernetes objects instead of editing `prometheus.yml`. | +| **Prometheus** | Time-series database + scraper. Pulls `/metrics` from every selected target, stores samples in a TSDB, evaluates recording / alerting rules. Exposes the PromQL query API on port 9090. | +| **Alertmanager** | Routes, deduplicates and silences alerts forwarded by Prometheus. Sends notifications to receivers (email/Slack/etc.). UI on port 9093. | +| **Grafana** | Dashboard front-end. Connects to Prometheus as a data source and ships ~30 pre-built dashboards covering pods, namespaces, nodes, kubelet, API server, networking. | +| **kube-state-metrics** | Exporter that turns Kubernetes object state (deployments, pods, replicasets, …) into Prometheus metrics. Source of truth for "how many pods are Pending in namespace X". | +| **node-exporter** | DaemonSet — one pod per node — exposing OS-level metrics: CPU, memory, disk, filesystem, network. Source of `node_*` metrics. | + +Together: Prometheus scrapes node-exporter + kube-state-metrics + Operator-provided +targets → stores metrics → Grafana visualizes → Alertmanager routes alerts triggered +by `PrometheusRule` evaluations. + +--- + +## 2. Installation + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update + +helm install monitoring prometheus-community/kube-prometheus-stack \ + --namespace monitoring \ + --create-namespace +``` + +Chart version installed: `kube-prometheus-stack-84.5.0`, app version `v0.90.1`. + +### 2.1 Resource verification + +```bash +kubectl get po,svc -n monitoring +``` + +```text +$ kubectl get po,svc -n monitoring +NAME READY STATUS RESTARTS AGE +pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 17m +pod/monitoring-grafana-f8c748584-6k2gk 3/3 Running 0 6m36s +pod/monitoring-kube-prometheus-operator-54f68d65b4-tbjcq 1/1 Running 0 19m +pod/monitoring-kube-state-metrics-5957bd45bc-skg6b 1/1 Running 0 19m +pod/monitoring-prometheus-node-exporter-fvmx6 1/1 Running 0 19m +pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 17m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 17m +service/monitoring-grafana ClusterIP 10.109.96.111 80/TCP 19m +service/monitoring-kube-prometheus-alertmanager ClusterIP 10.101.103.188 9093/TCP,8080/TCP 19m +service/monitoring-kube-prometheus-operator ClusterIP 10.105.173.79 443/TCP 19m +service/monitoring-kube-prometheus-prometheus ClusterIP 10.111.116.42 9090/TCP,8080/TCP 19m +service/monitoring-kube-state-metrics ClusterIP 10.108.64.63 8080/TCP 19m +service/monitoring-prometheus-node-exporter ClusterIP 10.105.251.198 9100/TCP 19m +service/prometheus-operated ClusterIP None 9090/TCP 17m +``` + +![Stack pods + services](screenshots/lab16/01-stack-pods-svc.png) + +CRDs installed by the Prometheus Operator (used in the bonus task): + +```text +alertmanagerconfigs.monitoring.coreos.com +alertmanagers.monitoring.coreos.com +prometheusagents.monitoring.coreos.com +prometheuses.monitoring.coreos.com +prometheusrules.monitoring.coreos.com +servicemonitors.monitoring.coreos.com +``` + +--- + +## 3. Grafana Dashboard Exploration + +```bash +kubectl port-forward svc/monitoring-grafana -n monitoring 3000:80 +# open http://localhost:3000 — credentials: admin / +# +# Get the auto-generated admin password: +# kubectl get secret monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d +``` + +Dashboards used (all pre-installed by the chart): + +- **Kubernetes / Compute Resources / Pod** +- **Kubernetes / Compute Resources / Namespace (Pods)** +- **Node Exporter / Nodes** +- **Kubernetes / Kubelet** +- **Kubernetes / Networking / Namespace (Pods)** + +### Q1 — Pod resources for the StatefulSet + +Dashboard: *Kubernetes / Compute Resources / Pod*. Pick `default` namespace, +each StatefulSet pod (`devops-info-python-0..2`) and read CPU + memory panels. + +Ground truth taken via PromQL at observation time (idle traffic, only +liveness/readiness probes hitting the pods): + +```text +$ rate(container_cpu_usage_seconds_total{pod=~"devops-info-python-[0-2]"}[5m]) + devops-info-python-0: ~0.00153 cores (1.5 millicores) + devops-info-python-1: ~0.00136 cores + devops-info-python-2: ~0.00140 cores + +$ container_memory_working_set_bytes{pod=~"devops-info-python-[0-2]"} / 1Mi + devops-info-python-0: ~23.8 MiB + devops-info-python-1: ~23.2 MiB + devops-info-python-2: ~24.2 MiB +``` + +CPU usage ≈ 1.5% of the request (`100m`), memory ≈ 18-19 % of the request +(`128Mi`). All 3 pods are very light — only health probes generate traffic. + +![Q1 — StatefulSet pods CPU / memory](screenshots/lab16/02-q1-statefulset-pod-resources.png) + +### Q2 — Most / least CPU consumers in `default` + +Dashboard: *Kubernetes / Compute Resources / Namespace (Pods)* with namespace +`default`. Sort the "CPU Usage" table. + +Six pods are running in `default`: 3 StatefulSet pods from this lab, and 3 +older Deployment pods left over from lab12. Ranking by CPU rate (5m): + +```text +sort_desc(sum by(pod) (rate(container_cpu_usage_seconds_total{namespace="default"}[5m]))) + + 1. devops-info-python-0 ~0.00153 cores ← MOST + 2. devops-info-python-2 ~0.00140 cores + 3. devops-info-python-1 ~0.00136 cores + 4. python-app-devops-info-python-67f5458c7c-qzjsf ~0.00114 cores + 5. python-app-devops-info-python-67f5458c7c-6qbxh ~0.00100 cores + 6. python-app-devops-info-python-67f5458c7c-l5wqv ~0.00095 cores ← LEAST +``` + +All six are within 0.6 millicores of each other — they run the same Flask app +with comparable probe traffic. + +![Q2 — namespace CPU ranking](screenshots/lab16/03-q2-namespace-cpu.png) + +### Q3 — Node memory & CPU + +Dashboard: *Node Exporter / Nodes*. Single-node minikube cluster. + +```text +Memory: + total: ~7936 MiB (8 GiB) + available: ~3382 MiB + used: ~57.4 % (~4554 MiB) + +CPU: + cores: 11 (host has 11 cores allocated to the minikube container) + busy: ~1.88 cores (sum(rate(node_cpu_seconds_total{mode!="idle"}[5m]))) + → ~17 % overall utilisation +``` + +![Q3 — node memory / CPU](screenshots/lab16/04-q3-node-metrics.png) + +### Q4 — Kubelet pod / container counts + +Dashboard: *Kubernetes / Kubelet*. Top-row stats give "Running Pods" and +"Running Containers". + +```text +Running statefulsets: 1 +Running pods: 47 +Running containers: 111 +Actual volume count: 169 +Desired volume count: 169 +``` + +The container count is much higher than the pod count because: + +- ArgoCD release brings in 7 components (controller, repo-server, server, dex, + redis, applicationset, notifications) each with sidecars. +- kube-prometheus-stack pods bundle multiple containers (e.g. `prometheus` + + `config-reloader`, `grafana` + `sidecar` + `init-chown-data`, alertmanager + + reloader). +- Several earlier-lab releases (lab10-15) still have sidecars (Vault Agent + injector, init containers, etc.). + +![Q4 — kubelet running pods / containers](screenshots/lab16/05-q4-kubelet-counts.png) + +### Q5 — Network traffic in `default` + +> **Caveat — minikube + docker driver:** `container_network_*` metrics are +> **not** emitted by cAdvisor in this environment ([minikube#9418](https://github.com/kubernetes/minikube/issues/9418)) +> — the Docker-in-Docker network namespace setup hides per-pod counters from +> the kubelet. The `Kubernetes / Networking / Namespace (Pods)` dashboard +> therefore stays blank on this cluster (cAdvisor target is `up` but the +> series simply do not exist). On a production / kubeadm cluster these +> metrics are available out of the box. + +Workaround used here: read **node-level** network metrics from node-exporter +in *Node Exporter / Nodes → Network Traffic* panel — that data **is** flowing. + +Snapshot of node-level rates at observation time (5 m rate, top devices): + +```text +$ rate(node_network_transmit_bytes_total[5m]) — top by device + lo (loopback, intra-node): ~202 KB/s ← bulk of pod-to-pod traffic + bridge (docker bridge): ~139 KB/s + veth0bbc8392 ... vethXXXX: 50–200 B/s ← one veth per pod + eth0 (host uplink): ~14 KB/s +``` + +After running a synthetic burst: + +```bash +kubectl port-forward svc/devops-info-python -n default 8080:80 & +for i in $(seq 1 200); do + curl -s http://localhost:8080/ > /dev/null + curl -s http://localhost:8080/visits > /dev/null +done +``` + +`lo` and `bridge` rates spike for ~1 minute — capture the *Node Exporter / +Nodes → Network Traffic* panel during the burst. + +![Q5 — node-level network traffic (per-pod unavailable on minikube/docker)](screenshots/lab16/06-q5-network.png) + +### Q6 — Active alerts (Alertmanager) + +```bash +kubectl port-forward svc/monitoring-kube-prometheus-alertmanager -n monitoring 9093:9093 +# open http://localhost:9093 +``` + +Five alerts firing under group `namespace="kube-system"` on a fresh +single-node minikube — every one is an expected artefact of the simplified +control plane: + +```text +ALERTS group: namespace="kube-system" — 5 alerts + + TargetDown job=kube-controller-manager severity=warning + TargetDown job=kube-etcd severity=warning + TargetDown job=kube-scheduler severity=warning + (minikube binds these components to non-default + ports / interfaces, so the chart's ServiceMonitors + can't scrape them — production clusters expose + them on standard endpoints) + etcdMembersDown job=kube-etcd severity=warning + etcdInsufficientMembers job=kube-etcd severity=critical + (single-node minikube has only 1 etcd member, + the rule expects ≥ quorum) +``` + +A built-in always-firing `Watchdog` alert is also present in the cluster +(`severity=none`, no namespace) — its purpose is to act as a heartbeat for +downstream receivers. It is not visible in the screenshot because the +`namespace="kube-system"` filter is active. All five visible alerts are true +positives that production clusters with HA etcd and exposed control-plane +endpoints would not trigger. + +![Q6 — Alertmanager active alerts](screenshots/lab16/07-q6-alertmanager.png) + +--- + +## 4. Init Containers + +Both patterns are integrated into the existing +[`templates/deployment.yaml`](devops-info-python/templates/deployment.yaml) +and gated by `.Values.initContainers.enabled` so non-init scenarios are not +affected. The dedicated values file +[`values-init.yaml`](devops-info-python/values-init.yaml) wires both patterns +together for a focused demo. + +### 4.1 Pattern 1 — Download with `wget` + +`init-download` runs `wget` against `.Values.initContainers.download.url` and +writes the response to a shared `emptyDir` named `work-dir`. The main +container mounts the same volume read-only at +`.Values.initContainers.mainMountPath`. + +```yaml +initContainers: + - name: init-download + image: busybox:1.36 + command: + - sh + - -c + - | + set -eu + wget -O /work-dir/index.html https://example.com + ls -la /work-dir + volumeMounts: + - name: work-dir + mountPath: /work-dir +volumes: + - name: work-dir + emptyDir: {} +``` + +### 4.2 Pattern 2 — Wait-for-service + +`init-wait` polls DNS until the configured FQDN resolves (or a configurable +timeout fires). Used here to block the main container until the +kube-prometheus-stack `monitoring-grafana` Service is reachable. + +```yaml +initContainers: + - name: init-wait + image: busybox:1.36 + command: + - sh + - -c + - | + set -eu + SVC="monitoring-grafana.monitoring.svc.cluster.local" + END=$(( $(date +%s) + 120 )) + until nslookup "$SVC" >/dev/null 2>&1; do + [ "$(date +%s)" -ge "$END" ] && exit 1 + sleep 2 + done +``` + +### 4.3 Deployment & verification + +```bash +kubectl create namespace init-demo +helm install init-demo k8s/devops-info-python \ + -n init-demo \ + -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-init.yaml + +# Watch the pod transition through Init:0/2 → Init:1/2 → Init:2/2 → Running +kubectl get po -n init-demo -w +``` + +Pod transitions through `Pending → Init:0/2 → Init:1/2 → PodInitializing → Running 1/1` +in ~15 seconds. + +Init container 1 log — `init-download`: + +```text +$ kubectl logs -n init-demo -c init-download +[init-download] fetching https://example.com +Connecting to example.com (8.6.112.0:443) +wget: note: TLS certificate validation not implemented +saving to '/work-dir/index.html' +index.html 100% |********************************| 528 0:00:00 ETA +'/work-dir/index.html' saved +[init-download] saved: +total 12 +drwxrwxrwx 2 root root 4096 May 10 11:00 . +drwxr-xr-x 1 root root 4096 May 10 11:00 .. +-rw-r--r-- 1 root root 528 May 10 11:00 index.html +``` + +Init container 2 log — `init-wait`: + +```text +$ kubectl logs -n init-demo -c init-wait +[init-wait] waiting for monitoring-grafana.monitoring.svc.cluster.local +[init-wait] monitoring-grafana.monitoring.svc.cluster.local resolved, proceeding +``` + +Main container reads the artifact downloaded by the init container — proves the +shared `emptyDir` volume works: + +```text +$ kubectl exec -n init-demo -c devops-info-python -- ls -la /work-dir +total 12 +drwxrwxrwx 2 root root 4096 May 10 11:00 . +drwxr-xr-x 1 root root 4096 May 10 11:00 .. +-rw-r--r-- 1 root root 528 May 10 11:00 index.html + +$ kubectl exec -n init-demo -c devops-info-python -- head -c 200 /work-dir/index.html +Example Domain... +``` + +Both init containers finished cleanly: + +```text +$ kubectl describe po -n init-demo | grep -E "Init Containers:|Containers:|Reason:" +Init Containers: + Reason: Completed + Reason: Completed +Containers: +``` + +![Init containers — pod lifecycle + logs](screenshots/lab16/08-init-containers.png) + +--- + +## 5. Bonus — Custom Metrics & ServiceMonitor + +### 5.1 `/metrics` endpoint + +Already exposed by the Flask app +([`app_python/app.py`](../app_python/app.py)) using +`prometheus-client==0.23.1`. Metrics emitted (RED method): + +| Metric | Type | Labels | +|---|---|---| +| `http_requests_total` | Counter | `method, endpoint, status` | +| `http_request_duration_seconds` | Histogram | `method, endpoint` | +| `http_requests_in_progress` | Gauge | — | +| `devops_info_endpoint_calls_total` | Counter | `endpoint` | +| `devops_info_system_collection_seconds` | Histogram | — | + +Plus the default `process_*` and `python_gc_*` metrics from the client library. + +### 5.2 ServiceMonitor template + +[`templates/servicemonitor.yaml`](devops-info-python/templates/servicemonitor.yaml) +is gated by `.Values.serviceMonitor.enabled`. The `release: monitoring` label +matches the default `serviceMonitorSelector` of the kube-prometheus-stack +release, so Prometheus picks the target up automatically. + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: devops-info-python + labels: + release: monitoring +spec: + selector: + matchLabels: + app.kubernetes.io/name: devops-info-python + endpoints: + - port: http + path: /metrics + interval: 15s + scrapeTimeout: 10s +``` + +The Service in [`templates/service.yaml`](devops-info-python/templates/service.yaml) +exposes a **named port `http`** (port 80 → targetPort 8080), which the +ServiceMonitor references. + +### 5.3 Activation & verification + +```bash +helm upgrade --install devops-info-python k8s/devops-info-python \ + -n default \ + -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-monitoring.yaml + +kubectl port-forward svc/monitoring-kube-prometheus-prometheus -n monitoring 9090:9090 +# open http://localhost:9090/targets — find serviceMonitor/default/devops-info-python +``` + +```text +$ curl -s http://localhost:9090/api/v1/targets | jq -r '.data.activeTargets[] + | select(.scrapePool|test("devops-info-python")) + | "\(.scrapePool) | health=\(.health) | url=\(.scrapeUrl) | pod=\(.labels.pod)"' +serviceMonitor/default/devops-info-python/0 | health=up | url=http://10.244.1.2:8080/metrics | pod=devops-info-python-0 +serviceMonitor/default/devops-info-python/0 | health=up | url=http://10.244.1.3:8080/metrics | pod=devops-info-python-1 +serviceMonitor/default/devops-info-python/0 | health=up | url=http://10.244.1.4:8080/metrics | pod=devops-info-python-2 +``` + +Aggregated query confirms application-level metrics are flowing in: + +```text +$ curl -sG --data-urlencode 'query=sum by(exported_endpoint, status) (http_requests_total)' \ + http://localhost:9090/api/v1/query | jq -r '...' +exported_endpoint=/health status=200 → 1048 # readiness/liveness probe traffic +exported_endpoint=/ status=200 → 24 # synthetic curl bursts +exported_endpoint=/visits status=200 → 24 +``` + +![Prometheus targets — ServiceMonitor scrape](screenshots/lab16/09-prom-targets.png) + +Sample query in the Prometheus UI (`http://localhost:9090/graph`): + +```promql +sum by (endpoint) (rate(http_requests_total[5m])) +``` + +![Prometheus graph — http_requests_total](screenshots/lab16/10-prom-graph.png) + +--- + +## 6. CLI Cheatsheet + +| Command | Purpose | +|---|---| +| `helm install monitoring prometheus-community/kube-prometheus-stack -n monitoring --create-namespace` | Install the full stack. | +| `kubectl get po,svc -n monitoring` | Verify all components. | +| `kubectl port-forward svc/monitoring-grafana -n monitoring 3000:80` | Access Grafana (admin / prom-operator). | +| `kubectl port-forward svc/monitoring-kube-prometheus-prometheus -n monitoring 9090:9090` | Access Prometheus UI. | +| `kubectl port-forward svc/monitoring-kube-prometheus-alertmanager -n monitoring 9093:9093` | Access Alertmanager UI. | +| `kubectl get servicemonitor -A` | List all ServiceMonitors picked up by the Operator. | +| `kubectl logs -c init-download -n init-demo` | Inspect init container output. | +| `kubectl exec -n init-demo -- cat /work-dir/index.html` | Confirm artifact reached the main container. | +| `helm upgrade --install ... -f values-monitoring.yaml` | Toggle the ServiceMonitor on the app release. | +| `helm uninstall monitoring -n monitoring && kubectl delete ns monitoring` | Cleanup. | + +--- + +## 7. Troubleshooting + +| Symptom | Cause | Fix | +|---|---|---| +| Pod stuck in `Init:0/1` for `kube-prometheus-stack` StatefulSets | `init-config-reloader` image still pulling from quay.io (slow over proxy) | Wait — first install pulls ~5 images (~1 GB total). Don't re-run `helm install --wait`, the timeout doesn't kill the pull. | +| Helm release status `failed` after a `--wait` timeout | `--wait` exited but resources kept reconciling | `helm upgrade monitoring prometheus-community/kube-prometheus-stack -n monitoring --reuse-values` once pods are ready to flip status to `deployed`. | +| `ServiceMonitor` exists but Prometheus shows no target | Missing `release: monitoring` label or `port:` name does not match Service | Verify with `kubectl get servicemonitor -n default -o yaml` and `kubectl get svc devops-info-python -o yaml` (port must have `name: http`). | +| `Kubernetes / Networking / Namespace (Pods)` dashboard is empty | cAdvisor on minikube/docker driver does not emit `container_network_*` series ([minikube#9418](https://github.com/kubernetes/minikube/issues/9418)) | Use *Node Exporter / Nodes → Network Traffic* instead — it reads `node_network_*` from node-exporter, which works. On production clusters per-pod network metrics are available. | +| Init container `init-download` errors with `wget: bad address` | Cluster has no outbound DNS/connectivity | Use an in-cluster URL or attach a `dnsPolicy: ClusterFirst` resolver. | +| Init container `init-wait` times out | Target Service not yet created or different namespace | Double-check FQDN: `..svc.cluster.local`. | +| Main container reads stale file from `/work-dir` | `emptyDir` is per-pod, not persistent | Expected — re-create the pod to re-run `init-download`. | + +--- + +## 8. Course Credentials Reused + +- `monitoring-grafana` admin: username `admin`, password auto-generated by the + chart and stored in `Secret/monitoring-grafana`. Retrieve with: + ```bash + kubectl get secret monitoring-grafana -n monitoring -o jsonpath='{.data.admin-password}' | base64 -d + ``` + (Older docs claim the default is `prom-operator` — current chart versions + generate a random one unless `grafana.adminPassword` is set in values.) +- ArgoCD admin: see [COURSE_CREDENTIALS.local.md](COURSE_CREDENTIALS.local.md) (untouched in this lab). diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 0000000000..a129e859cd --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,365 @@ +# Lab 09 — Kubernetes Fundamentals + +## 1. Architecture Overview + +### Deployment Architecture + +``` + ┌──────────────────────────────────────────────────┐ + │ Minikube Cluster (Docker) │ + │ │ + │ ┌──────────────────────────────────────────┐ │ + │ │ Ingress (nginx, TLS termination) │ │ + │ │ local.example.com │ │ + │ │ /app1 → python-service │ │ + │ │ /app2 → go-service │ │ + │ └────────────┬──────────────┬──────────────┘ │ + │ │ │ │ + │ ┌────────────▼───────┐ ┌────▼──────────────┐ │ + │ │ Service (NodePort) │ │ Service (ClusterIP)│ │ + │ │ python :80→8080 │ │ go :80→8080 │ │ + │ └────────┬───────────┘ └────┬──────────────┘ │ + │ ┌────┼────┐ ┌────┼────┐ │ + │ ▼ ▼ ▼ ▼ ▼ ▼ │ + │ Pod Pod Pod Pod Pod Pod │ + │ 128Mi/100m each 64Mi/50m each │ + └──────────────────────────────────────────────────┘ +``` + +- **6 Pods total** — 3 Python replicas + 3 Go replicas +- **2 Services** — NodePort for Python (direct access), ClusterIP for Go +- **1 Ingress** — nginx with TLS, path-based routing (`/app1`, `/app2`) +- **Resource allocation** — Python: 128Mi/100m request, 256Mi/200m limit; Go: 64Mi/50m request, 128Mi/100m limit (smaller binary, lower overhead) + +## 2. Manifest Files + +| File | Description | +|------|-------------| +| `deployment.yml` | Python app — 3 replicas, health probes, resource limits, rolling update strategy | +| `service.yml` | Python service — NodePort type, port 80→8080, nodePort 30080 | +| `deployment-go.yml` | Go app — 3 replicas, health probes, lower resource limits (compiled binary) | +| `service-go.yml` | Go service — ClusterIP type, port 80→8080 | +| `ingress.yml` | Ingress — nginx controller, path-based routing, TLS with self-signed cert | + +### Key Configuration Choices + +- **3 replicas** — provides HA; enough for load balancing without overloading a single-node cluster +- **RollingUpdate with maxSurge=1, maxUnavailable=0** — guarantees zero downtime during updates +- **Resource requests/limits** — prevents resource starvation; Go app gets less since it has lower memory footprint (~10MB vs ~40MB for Python) +- **Liveness + Readiness probes on `/health`** — liveness restarts unhealthy containers, readiness removes them from Service endpoints until ready +- **imagePullPolicy: Never** — images are pre-loaded into minikube via `minikube image load` + +## 3. Deployment Evidence + +### Cluster Setup + +![Cluster Setup](docs/screenshots/cluster_setup.png) + +``` +$ kubectl cluster-info +Kubernetes control plane is running at https://127.0.0.1:49866 +CoreDNS is running at https://127.0.0.1:49866/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + +$ kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +minikube Ready control-plane 20s v1.35.1 192.168.49.2 Debian GNU/Linux 12 (bookworm) 6.12.76-linuxkit docker://29.2.1 +``` + +**Tool choice: minikube** — full-featured local Kubernetes with built-in addon system (ingress, dashboard, metrics-server). Runs inside Docker on macOS via Docker Desktop driver. Preferred over kind for this lab because of native addon support. + +### kubectl get all + +![kubectl get all](docs/screenshots/kubectl_get_all.png) + +``` +NAME READY STATUS RESTARTS AGE +pod/devops-info-go-659897f67d-lx8hq 1/1 Running 0 5m18s +pod/devops-info-go-659897f67d-v92rc 1/1 Running 0 5m18s +pod/devops-info-go-659897f67d-wms8r 1/1 Running 0 5m18s +pod/devops-info-python-68f8fb9d94-chpc5 1/1 Running 0 7m14s +pod/devops-info-python-68f8fb9d94-dts7z 1/1 Running 0 7m9s +pod/devops-info-python-68f8fb9d94-grmjk 1/1 Running 0 7m22s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/devops-info-go-service ClusterIP 10.99.120.232 80/TCP 5m18s +service/devops-info-python-service NodePort 10.96.158.179 80:30080/TCP 11m +service/kubernetes ClusterIP 10.96.0.1 443/TCP 11m + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/devops-info-go 3/3 3 3 5m18s +deployment.apps/devops-info-python 3/3 3 3 11m +``` + +### kubectl describe deployment + +![Describe Deployment](docs/screenshots/describe_deployment.png) + +``` +Name: devops-info-python +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +RollingUpdateStrategy: 0 max unavailable, 1 max surge +Containers: + devops-info-python: + Image: aezuraa/devops-info-service:python + Limits: cpu: 200m, memory: 256Mi + Requests: cpu: 100m, memory: 128Mi + Liveness: http-get http://:8080/health delay=10s timeout=3s period=5s #success=1 #failure=3 + Readiness: http-get http://:8080/health delay=5s timeout=2s period=3s #success=1 #failure=3 +``` + +### App Response via Service + +![App Response](docs/screenshots/curl_app_response.png) + +``` +$ curl -s http://127.0.0.1:50035/ | python3 -m json.tool +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Flask" + }, + "system": { + "hostname": "devops-info-python-68f8fb9d94-2lpts", + "platform": "Linux", + "architecture": "aarch64", + "cpu_count": 11, + "python_version": "3.12.13" + } +} +``` + +## 4. Operations Performed + +### Deploy + +```bash +kubectl apply -f k8s/deployment.yml +kubectl apply -f k8s/service.yml +``` + +### Scaling to 5 Replicas + +![Pods Running (scaled)](docs/screenshots/pods_running.png) + +``` +$ kubectl scale deployment/devops-info-python --replicas=5 +deployment.apps/devops-info-python scaled + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +devops-info-python-68f8fb9d94-2lpts 1/1 Running 0 76s +devops-info-python-68f8fb9d94-bhbx7 1/1 Running 0 10s +devops-info-python-68f8fb9d94-g854x 1/1 Running 0 71s +devops-info-python-68f8fb9d94-jp7sd 1/1 Running 0 10s +devops-info-python-68f8fb9d94-jw4pl 1/1 Running 0 84s + +$ kubectl rollout status deployment/devops-info-python +deployment "devops-info-python" successfully rolled out +``` + +### Rolling Update + +Triggered via environment variable change (simulating config update): + +``` +$ kubectl set env deployment/devops-info-python APP_VERSION=1.1.0 +deployment.apps/devops-info-python env updated + +$ kubectl rollout status deployment/devops-info-python +Waiting for deployment "devops-info-python" rollout to finish: 1 out of 5 new replicas have been updated... +Waiting for deployment "devops-info-python" rollout to finish: 2 out of 5 new replicas have been updated... +Waiting for deployment "devops-info-python" rollout to finish: 3 out of 5 new replicas have been updated... +Waiting for deployment "devops-info-python" rollout to finish: 4 out of 5 new replicas have been updated... +deployment "devops-info-python" successfully rolled out +``` + +Zero downtime achieved — `maxUnavailable: 0` ensures at least 5 pods serve traffic throughout the update. + +### Rollback + +``` +$ kubectl rollout undo deployment/devops-info-python +deployment.apps/devops-info-python rolled back + +$ kubectl rollout history deployment/devops-info-python +REVISION CHANGE-CAUSE +1 +3 +4 +``` + +Rollback completes the same way — new ReplicaSet is scaled up, old one is scaled down gradually. + +### Service Access + +```bash +# Via NodePort (minikube service tunnel) +minikube service devops-info-python-service --url +# → http://127.0.0.1:50035 + +# Via port-forward +kubectl port-forward service/devops-info-go-service 8081:80 +``` + +## 5. Production Considerations + +### Health Checks + +- **Liveness probe** (`/health`, period=5s) — restarts container if 3 consecutive checks fail; catches deadlocks, memory leaks +- **Readiness probe** (`/health`, period=3s) — removes pod from Service endpoints during startup or degradation; prevents traffic to unhealthy pods +- **initialDelaySeconds** — Python gets 10s (Flask startup), Go gets 5s (instant binary start) + +### Resource Limits Rationale + +| App | Requests | Limits | Reason | +|-----|----------|--------|--------| +| Python | 128Mi/100m | 256Mi/200m | Flask + Python runtime overhead ~40MB idle | +| Go | 64Mi/50m | 128Mi/100m | Compiled binary, ~10MB idle | + +Requests guarantee scheduling; limits prevent noisy-neighbor problems on shared nodes. + +### Production Improvements + +- **HPA (Horizontal Pod Autoscaler)** — auto-scale based on CPU/memory instead of static replica count +- **PodDisruptionBudget** — ensure minimum availability during voluntary disruptions (node drain, upgrades) +- **NetworkPolicy** — restrict inter-pod communication to only what's needed +- **Secrets management** — use external secrets operator or Vault instead of env vars +- **cert-manager** — auto-provision and renew TLS certificates via Let's Encrypt +- **Pod anti-affinity** — spread replicas across nodes for true HA +- **Resource quotas** — namespace-level limits to prevent any team from consuming all cluster resources + +### Monitoring & Observability + +- Python app already exposes `/metrics` (Prometheus format) — wire up with Prometheus + Grafana +- Structured JSON logs (Python) — ready for log aggregation via Loki/ELK +- Add `kubectl top pods` with metrics-server for real-time resource monitoring + +## 6. Challenges & Solutions + +### Challenge 1: ImagePullBackOff + +**Problem:** Pods failed with `ImagePullBackOff` — minikube couldn't pull `aezuraa/devops-info-service:python` from Docker Hub because the image was built for `linux/amd64` but the cluster runs `linux/arm64`. + +**Solution:** Built the image locally for the correct architecture, loaded it into minikube with `minikube image load`, and set `imagePullPolicy: Never`. + +**Debugging:** `kubectl describe pod ` → Events section showed the exact pull error. + +### Challenge 2: Ingress on macOS Docker Driver + +**Problem:** `minikube tunnel` couldn't bind to port 80/443 on 127.0.0.1 without sudo. + +**Solution:** Used `kubectl port-forward` to the ingress-nginx-controller service as an alternative: + +```bash +kubectl port-forward -n ingress-nginx service/ingress-nginx-controller 8443:443 8080:80 +``` + +### Challenge 3: HTTP to HTTPS Redirect + +**Problem:** HTTP requests to Ingress returned 308 redirect instead of content. + +**Solution:** This is expected behavior — nginx Ingress automatically redirects HTTP→HTTPS when TLS is configured. Tested directly via HTTPS port. + +### Key Learnings + +- Kubernetes is truly declarative — define desired state, controllers reconcile +- Labels/selectors are the glue between Deployments, Services, and Ingress +- Health probes are essential — without them, K8s can't distinguish healthy from unhealthy pods +- Rolling updates with `maxUnavailable: 0` guarantee zero downtime +- Local development with minikube requires image pre-loading when not using a registry + +--- + +## Bonus: Ingress with TLS + +### Multi-App Deployment + +Both Python and Go apps deployed as separate Deployments with their own Services. + +### Ingress Controller + +``` +$ minikube addons enable ingress +* The 'ingress' addon is enabled + +$ kubectl get pods -n ingress-nginx +NAME READY STATUS +ingress-nginx-controller-596f8778bc-w2s9z 1/1 Running +``` + +### TLS Certificate + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout tls.key -out tls.crt \ + -subj "/CN=local.example.com/O=local.example.com" + +kubectl create secret tls tls-secret --key tls.key --cert tls.crt +``` + +### Path-Based Routing + +``` +$ kubectl get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +apps-ingress nginx local.example.com 192.168.49.2 80, 443 113s +``` + +### Ingress HTTPS — exact commands (404 without `Host`) + +Ingress rules use **host** `local.example.com`. If you run `curl https://127.0.0.1:8443/app1` **without** `Host: local.example.com`, nginx does not match this Ingress and returns **404**. + +**Terminal 1** (leave running): + +```bash +kubectl port-forward -n ingress-nginx service/ingress-nginx-controller 8443:443 +``` + +**Terminal 2** — screenshots / checks: + +```bash +# Python app (must include Host header) +curl -sk -H "Host: local.example.com" https://127.0.0.1:8443/app1 + +# Go app +curl -sk -H "Host: local.example.com" https://127.0.0.1:8443/app2 + +# Ingress + secrets (screenshot) +kubectl get ingress +kubectl get secrets +``` + +Optional: add `local.example.com` to `/etc/hosts` pointing at `minikube ip`, then you can use `curl -sk https://local.example.com:8443/app1` **only if** you still reach the controller on that port (same port-forward applies). + +### Routing Verification + +![HTTPS /app1 — Python](docs/screenshots/ingress_https_app1.png) + +![HTTPS /app2 — Go](docs/screenshots/ingress_https_app2.png) + +![Ingress & Secrets Status](docs/screenshots/ingress_status.png) + +``` +$ curl -sk -H "Host: local.example.com" https://127.0.0.1:8443/app1 | python3 -m json.tool +{ + "service": { "framework": "Flask", "name": "devops-info-service" }, + ... +} + +$ curl -sk -H "Host: local.example.com" https://127.0.0.1:8443/app2 | python3 -m json.tool +{ + "service": { "framework": "Go net/http", "name": "devops-info-service" }, + ... +} +``` + +### Ingress Benefits over NodePort + +- **L7 routing** — route by path/host instead of port numbers (no need to remember 30080, 30081...) +- **TLS termination** — one certificate at the edge, backends stay HTTP +- **Centralized config** — single entry point for all services +- **Name-based virtual hosting** — multiple domains on one IP +- **Rewrite rules** — transform URLs before forwarding to backends diff --git a/k8s/ROLLOUTS.md b/k8s/ROLLOUTS.md new file mode 100644 index 0000000000..3d85cc063d --- /dev/null +++ b/k8s/ROLLOUTS.md @@ -0,0 +1,434 @@ +# Argo Rollouts — Progressive Delivery (Lab 14) + +Canary and Blue-Green deployments of `devops-info-python` Helm chart powered by the Argo Rollouts controller. + +## 1. Setup + +### 1.1 Installation + +Controller + CRDs + Dashboard + CLI plugin installed on minikube. + +```bash +# Controller +kubectl create namespace argo-rollouts +kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/install.yaml + +# Dashboard +kubectl apply -n argo-rollouts -f https://github.com/argoproj/argo-rollouts/releases/latest/download/dashboard-install.yaml + +# CLI plugin (macOS / Homebrew) +brew install argoproj/tap/kubectl-argo-rollouts +``` + +Verification: + +```text +$ kubectl argo rollouts version +kubectl-argo-rollouts: v1.8.3+49fa151 + +$ kubectl -n argo-rollouts get deployments +NAME READY UP-TO-DATE AVAILABLE AGE +argo-rollouts 1/1 1 1 23m +argo-rollouts-dashboard 1/1 1 1 23m + +$ kubectl get crd | grep argoproj.io +analysisruns.argoproj.io +analysistemplates.argoproj.io +clusteranalysistemplates.argoproj.io +experiments.argoproj.io +rollouts.argoproj.io +``` + +### 1.2 Dashboard Access + +```bash +kubectl port-forward svc/argo-rollouts-dashboard -n argo-rollouts 3100:3100 +# http://localhost:3100 +``` + +![Dashboard landing](screenshots/lab14/01-dashboard-overview.png) + +### 1.3 Rollout vs Deployment + +| Field | Deployment | Rollout | +|---|---|---| +| `apiVersion` | `apps/v1` | `argoproj.io/v1alpha1` | +| `spec.strategy` | `RollingUpdate` \| `Recreate` | `canary` \| `blueGreen` (rich progressive config) | +| Traffic shifting | No | Yes (replica-based or traffic-manager based) | +| Manual gates | No | `pause: {}` between steps | +| Metric-based gating | No | `analysis` steps with `AnalysisTemplate` | +| Auto-rollback | No | Yes, on failed analysis or on `abort` | +| Preview env (B/G) | No | `previewService` separate from `activeService` | +| Pod template / selectors | Same | Same | + +Everything else (`replicas`, `selector`, pod `template`, probes, volumes, env) is identical — the Deployment can be converted by swapping `kind` and adding `strategy:`. + +--- + +## 2. Canary Deployment + +### 2.1 Strategy + +File: [k8s/devops-info-python/templates/rollout.yaml](devops-info-python/templates/rollout.yaml) +Values: [k8s/devops-info-python/values-canary.yaml](devops-info-python/values-canary.yaml) + +```yaml +strategy: + canary: + steps: + - setWeight: 20 + - pause: {} # step 1 — manual promote + - setWeight: 40 + - pause: { duration: 30s } # steps 3,5,7 — auto after 30s + - setWeight: 60 + - pause: { duration: 30s } + - setWeight: 80 + - pause: { duration: 30s } + - setWeight: 100 +``` + +With `replicas: 5`, traffic weight translates to pod count: 20 % ≈ 1 pod, 40 % ≈ 2, 60 % ≈ 3, 80 % ≈ 4, 100 % = 5. + +### 2.2 Install + +```bash +kubectl create ns rollouts +helm install devops-info-python k8s/devops-info-python \ + -n rollouts -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-canary.yaml +``` + +Initial state — 5 stable pods serving 100 %: + +```text +Status: ✔ Healthy +Strategy: Canary + Step: 9/9 + SetWeight: 100 +Images: aezuraa/devops-info-service:python (stable) +Replicas: Desired 5 / Ready 5 +``` + +### 2.3 Trigger and Observe + +```bash +kubectl argo rollouts set image devops-info-python -n rollouts \ + devops-info-python=aezuraa/devops-info-service:go +``` + +Paused at step 1 (20 %) — **manual promotion required**: + +```text +Status: ॥ Paused +Message: CanaryPauseStep +Strategy: Canary + Step: 1/9 + SetWeight: 20 ActualWeight: 20 +Images: aezuraa/devops-info-service:go (canary) + aezuraa/devops-info-service:python (stable) +├──# revision:2 devops-info-python-6d4457956f ReplicaSet ✔ Healthy (1 pod, canary) +└──# revision:1 devops-info-python-678b568b65 ReplicaSet ✔ Healthy (4 pods, stable) +``` + +![Canary paused at 20%](screenshots/lab14/02-canary-paused-20.png) + +### 2.4 Manual Promotion + +```bash +kubectl argo rollouts promote devops-info-python -n rollouts +``` + +Advanced to step 3 (40 %) → auto-progresses every 30 s through 60 % → 80 % → 100 %: + +```text +Status: ✔ Healthy +Step: 9/9 SetWeight: 100 +Images: aezuraa/devops-info-service:go (stable) +revision:2 — 5 pods stable revision:1 — ScaledDown +``` + +![Canary fully promoted](screenshots/lab14/03-canary-promoted.png) + +### 2.5 Rollback (Abort) + +Abort while a rollout is in progress — traffic returns to the previous stable revision instantly (no gradual shift, just scale down the canary RS). + +```bash +# Start another rollout +kubectl argo rollouts set image devops-info-python -n rollouts \ + devops-info-python=aezuraa/devops-info-service:python +# Abort before finishing +kubectl argo rollouts abort devops-info-python -n rollouts +``` + +```text +Status: ✖ Degraded +Message: RolloutAborted: Rollout aborted update to revision 3 +Step: 0/9 SetWeight: 0 +Images: aezuraa/devops-info-service:go (stable) +revision:3 canary — ScaledDown / Terminating +revision:2 stable — 5 pods Running (100 % traffic) +``` + +![Canary aborted](screenshots/lab14/04-canary-aborted.png) + +To resume: `kubectl argo rollouts retry rollout devops-info-python -n rollouts`. + +--- + +## 3. Blue-Green Deployment + +### 3.1 Strategy + +Values: [k8s/devops-info-python/values-bluegreen.yaml](devops-info-python/values-bluegreen.yaml) + +```yaml +strategy: + blueGreen: + activeService: devops-info-python # prod traffic + previewService: devops-info-python-preview # test the green version + autoPromotionEnabled: false # manual promote + scaleDownDelaySeconds: 30 # keep old RS 30 s for rollback +``` + +Two Services are deployed simultaneously — the active one is always updated to the selector of whichever revision is "active"; the preview one to the "green" revision. + +### 3.2 Install and Trigger + +```bash +helm install devops-info-python k8s/devops-info-python \ + -n rollouts -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-bluegreen.yaml + +kubectl argo rollouts set image devops-info-python -n rollouts \ + devops-info-python=aezuraa/devops-info-service:go +``` + +Rollout paused — both versions running side-by-side: + +```text +Status: ॥ Paused Message: BlueGreenPause +Strategy: BlueGreen +Images: aezuraa/devops-info-service:go (preview) + aezuraa/devops-info-service:python (stable, active) +Replicas: Desired 3 / Current 6 (3 blue + 3 green) +``` + +![Blue-Green preview ready](screenshots/lab14/05-bluegreen-preview.png) + +### 3.3 Verify Preview vs Active + +```bash +kubectl port-forward svc/devops-info-python -n rollouts 8081:80 & +kubectl port-forward svc/devops-info-python-preview -n rollouts 8082:80 & +``` + +```text +# Active (8081) — Python / Flask +{"service":{"name":"devops-info-service", ... }, "runtime":{"framework":"Flask"} ...} + +# Preview (8082) — Go / net/http +{"service":{"name":"devops-info-service","framework":"Go net/http"}, ...} +``` + +### 3.4 Promote + +```bash +kubectl argo rollouts promote devops-info-python -n rollouts +``` + +The `activeService` selector is switched from the blue RS's pod-template-hash to the green one — **instantly**, no traffic shift, no multi-version mixing. Old pods stay for `scaleDownDelaySeconds` in case of rollback. + +```text +Status: ✔ Healthy +Images: aezuraa/devops-info-service:go (stable, active) + aezuraa/devops-info-service:python +revision:2 stable,active (3 pods) revision:1 delay:5s (still present) +``` + +![Blue-Green promoted](screenshots/lab14/06-bluegreen-promoted.png) + +### 3.5 Instant Rollback + +```bash +kubectl argo rollouts undo devops-info-python -n rollouts +``` + +Active selector is switched back to the original RS within a second. Because the old pods are still up (within `scaleDownDelaySeconds`), no pod re-creation is needed — latency is near-zero: + +```text +Status: ✔ Healthy +Images: aezuraa/devops-info-service:go + aezuraa/devops-info-service:python (stable, active) +``` + +![Blue-Green rolled back](screenshots/lab14/07-bluegreen-rollback.png) + +--- + +## 4. Strategy Comparison + +| | **Canary** | **Blue-Green** | +|---|---|---| +| Traffic shift | Gradual (replica % or traffic manager) | Instant switch | +| Resource cost | Shared pool (small overhead) | 2× during overlap | +| Rollback speed | Scale down canary RS (seconds) | Service selector flip (≤ 1 s) | +| User impact on fail | Some % got bad version | 100 % got bad version (until flip back) | +| Debuggability | Watch metrics across % | Test preview before promotion | +| Best for | Stateless APIs, UI apps where partial blast radius is fine | Risky releases, DB-schema-sensitive apps, easy pre-prod validation | +| When NOT to use | Breaking API changes between old and new (clients see both) | Tight resource budget; long-lived connections you cannot drain | + +### Recommendation + +- **Canary** for the default path — `devops-info-python`-style services with idempotent HTTP endpoints and metric-based validation. +- **Blue-Green** when a new build touches shared state (schema migrations, cache format) and mixing versions is dangerous — get preview correctness first, promote once. + +--- + +## 5. Bonus — Automated Analysis + +### 5.1 AnalysisTemplate + +File: [k8s/devops-info-python/templates/analysistemplate.yaml](devops-info-python/templates/analysistemplate.yaml) +Values: [k8s/devops-info-python/values-canary-analysis.yaml](devops-info-python/values-canary-analysis.yaml) + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: devops-info-python-healthcheck +spec: + args: + - name: service-name + metrics: + - name: health-probe + interval: 10s + count: 3 + failureLimit: 1 + successCondition: 'result == "healthy"' + provider: + web: + url: "http://{{args.service-name}}.rollouts.svc.cluster.local:80/health" + timeoutSeconds: 5 + jsonPath: "{$.status}" +``` + +The probe hits `/health` every 10 s, 3 times in a row. Success = JSON `status` field equals `"healthy"`. One failure fails the analysis → rollout auto-aborted → traffic reverts to stable. + +### 5.2 Canary with Analysis Step + +```yaml +strategy: + canary: + steps: + - setWeight: 25 + - pause: { duration: 10s } + - analysis: + templates: + - templateName: devops-info-python-healthcheck + args: + - name: service-name + value: devops-info-python + - setWeight: 50 + - pause: { duration: 10s } + - setWeight: 75 + - pause: { duration: 10s } + - setWeight: 100 +``` + +### 5.3 Success Path + +```bash +helm install devops-info-python k8s/devops-info-python \ + -n rollouts -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-canary-analysis.yaml + +kubectl argo rollouts set image devops-info-python -n rollouts \ + devops-info-python=aezuraa/devops-info-service:go +``` + +```text +AnalysisRun devops-info-python-6d4457956f-2-2 + Metrics: health-probe Phase: Successful + Measurement 1 Phase: Successful Value: "healthy" + Measurement 2 Phase: Successful Value: "healthy" + Measurement 3 Phase: Successful Value: "healthy" +→ Rollout advances through setWeight 50 → 75 → 100 +→ Final Status: ✔ Healthy Images: go (stable) +``` + +![Analysis success](screenshots/lab14/08-analysis-success.png) + +### 5.4 Auto-Rollback on Failure + +Inject a failure by pointing the probe at a non-existent service: + +```bash +kubectl -n rollouts patch rollout devops-info-python --type=merge -p ' +spec: + strategy: + canary: + steps: + - setWeight: 25 + - pause: { duration: 5s } + - analysis: + templates: [{ templateName: devops-info-python-healthcheck }] + args: + - { name: service-name, value: does-not-exist-on-purpose } + - setWeight: 50 + - pause: { duration: 10s } + - setWeight: 75 + - pause: { duration: 10s } + - setWeight: 100 +' +kubectl argo rollouts set image devops-info-python -n rollouts \ + devops-info-python=aezuraa/devops-info-service:python +``` + +Outcome: + +```text +AnalysisRun Phase: Error + Consecutive Error: 5 (> consecutiveErrorLimit 4) + Message: dial tcp: lookup does-not-exist-on-purpose.rollouts.svc.cluster.local on 10.96.0.10:53: no such host + +Rollout Status: ✖ Degraded +Message: RolloutAborted: Rollout aborted update to revision 3: + Step-based analysis phase error/failed: Metric "health-probe" assessed Error + +revision:3 canary — ScaledDown / Terminating +revision:2 stable — 4 pods Running (go image) +``` + +→ The rollout **auto-aborted** and traffic stayed on the previous stable version — no human action required. + +![Analysis auto-rollback](screenshots/lab14/09-analysis-auto-rollback.png) + +--- + +## 6. CLI Commands Reference + +| Action | Command | +|---|---| +| Install controller | `kubectl apply -n argo-rollouts -f ` | +| Install CLI | `brew install argoproj/tap/kubectl-argo-rollouts` | +| Dashboard | `kubectl port-forward svc/argo-rollouts-dashboard -n argo-rollouts 3100:3100` | +| Get rollout | `kubectl argo rollouts get rollout -n ` | +| Watch rollout | `kubectl argo rollouts get rollout -n -w` | +| Update image | `kubectl argo rollouts set image -n =:` | +| Manual promote | `kubectl argo rollouts promote -n ` | +| Promote all skipping pauses | `kubectl argo rollouts promote -n --full` | +| Abort | `kubectl argo rollouts abort -n ` | +| Retry aborted | `kubectl argo rollouts retry rollout -n ` | +| Undo to previous revision | `kubectl argo rollouts undo -n ` | +| Undo to specific revision | `kubectl argo rollouts undo -n --to-revision=` | +| Pause indefinitely | `kubectl argo rollouts pause -n ` | +| List AnalysisRuns | `kubectl -n get analysisrun` | +| Debug analysis | `kubectl -n describe analysisrun ` | + +### Troubleshooting + +- **`ErrImagePull` on minikube** — the image isn't on the node: `minikube image load :`. +- **Rollout stuck `Progressing`** — `kubectl argo rollouts get rollout ... -w` and check `Message`; usually pod probes failing or analysis not satisfied. +- **AnalysisRun `Error`** vs **`Failed`** — `Error` means the provider couldn't be evaluated (DNS, HTTP 5xx); `Failed` means `successCondition` evaluated to false. Both trigger rollback once the limit is exceeded. +- **Preview pods never appear (Blue-Green)** — ensure `previewService` exists before triggering the rollout; selectors are managed by the controller, not Helm. diff --git a/k8s/SECRETS.md b/k8s/SECRETS.md new file mode 100644 index 0000000000..246584b0dc --- /dev/null +++ b/k8s/SECRETS.md @@ -0,0 +1,437 @@ +# Lab 11 — Kubernetes Secrets & HashiCorp Vault + +## Table of Contents +1. [Kubernetes Secrets](#1-kubernetes-secrets) +2. [Helm Secret Integration](#2-helm-secret-integration) +3. [Resource Management](#3-resource-management) +4. [Vault Integration](#4-vault-integration) +5. [Security Analysis](#5-security-analysis) +6. [Bonus — Vault Agent Templates](#6-bonus--vault-agent-templates) + +--- + +## 1. Kubernetes Secrets + +### Creating a Secret + +```bash +kubectl create secret generic app-credentials \ + --from-literal=username=admin \ + --from-literal=password=S3cur3P@ssw0rd +``` + +Output: +``` +secret/app-credentials created +``` + +### Viewing the Secret (YAML) + +```bash +kubectl get secret app-credentials -o yaml +``` + +```yaml +apiVersion: v1 +data: + password: UzNjdXIzUEBzc3cwcmQ= + username: YWRtaW4= +kind: Secret +metadata: + name: app-credentials + namespace: default +type: Opaque +``` + +### Decoding Base64 Values + +```bash +echo "YWRtaW4=" | base64 -d # → admin +echo "UzNjdXIzUEBzc3cwcmQ=" | base64 -d # → S3cur3P@ssw0rd +``` + +### Base64 Encoding vs Encryption + +| Aspect | Base64 Encoding | Encryption | +|--------|----------------|------------| +| Purpose | Transport/storage format | Data protection | +| Security | **None** — trivially reversible | Strong — requires key to decrypt | +| K8s default | Yes — secrets are base64 only | No — must enable etcd encryption | +| Reversible | By anyone with `base64 -d` | Only with the encryption key | + +**Key takeaway:** Kubernetes Secrets are base64-encoded, **NOT encrypted** by default. Anyone with API access can decode them. + +### etcd Encryption at Rest +- By default, secrets are stored in etcd as base64 plaintext +- Enable `EncryptionConfiguration` to encrypt secrets at rest in etcd +- Recommended for production: use `aescbc`, `aesgcm`, or KMS provider +- Combine with RBAC to restrict `get`/`list` access to secrets + +--- + +## 2. Helm Secret Integration + +### Chart Structure + +``` +devops-info-python/ +├── Chart.yaml +├── values.yaml +└── templates/ + ├── _helpers.tpl + ├── deployment.yaml + ├── secrets.yaml ← NEW + ├── serviceaccount.yaml ← NEW + └── service.yaml +``` + +### Secret Template (`templates/secrets.yaml`) + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.fullname" . }}-secret + labels: + {{- include "common.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- range $key, $value := .Values.secrets }} + {{ $key }}: {{ $value | quote }} + {{- end }} +``` + +### Secret Values in `values.yaml` + +```yaml +secrets: + DB_USERNAME: "placeholder" + DB_PASSWORD: "placeholder" +``` + +Real values injected at deploy time (never committed): +```bash +helm upgrade --install devops-info-python ./devops-info-python \ + --set secrets.DB_USERNAME= \ + --set secrets.DB_PASSWORD= +``` + +### Consuming Secrets in Deployment + +Secrets are injected via `envFrom` with `secretRef`: + +```yaml +envFrom: + - secretRef: + name: {{ include "common.fullname" . }}-secret +``` + +### Verification + +**Environment variables inside pod:** +```bash +kubectl exec -- env | grep DB_ +``` +``` +DB_PASSWORD= +DB_USERNAME= +``` + +**`kubectl describe pod` output — values are NOT visible:** +``` +Environment Variables from: + devops-info-python-secret Secret Optional: false +Environment: + HOST: 0.0.0.0 + PORT: 8080 + DEBUG: False +``` + +![secret_describe_pod](docs/screenshots/secret_describe_pod.png) + +--- + +## 3. Resource Management + +### Configuration in `values.yaml` + +**Python chart:** +```yaml +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" +``` + +**Go chart:** +```yaml +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" +``` + +### Requests vs Limits + +| Aspect | Requests | Limits | +|--------|----------|--------| +| Purpose | Minimum guaranteed resources | Maximum allowed resources | +| Scheduling | Used by scheduler for pod placement | Not used for scheduling | +| Enforcement | Soft — pod always gets at least this | Hard — pod killed/throttled if exceeded | +| OOM Kill | Not triggered by requests | Triggered if memory limit exceeded | +| CPU | Guaranteed CPU share | CPU throttled at limit | + +### Choosing Appropriate Values +1. **Start with monitoring** — observe actual resource usage under load +2. **Requests** — set to average/typical consumption (~P50) +3. **Limits** — set to peak consumption (~P99) + buffer (1.5-2x requests) +4. **QoS Classes** — `Guaranteed` (requests=limits) for critical workloads, `Burstable` for general use +5. Go services use fewer resources than Python due to compiled binary + lower memory footprint + +--- + +## 4. Vault Integration + +### Installation + +```bash +helm repo add hashicorp https://helm.releases.hashicorp.com +helm repo update +helm install vault hashicorp/vault \ + --set "server.dev.enabled=true" \ + --set "injector.enabled=true" +``` + +### Vault Pods Running + +```bash +kubectl get pods -l app.kubernetes.io/name=vault +``` +``` +NAME READY STATUS RESTARTS AGE +vault-0 1/1 Running 0 6m +vault-agent-injector-848dd747d7-htcrd 1/1 Running 0 6m +``` + +![vault_pods_running](docs/screenshots/vault_pods_running.png) + +### KV Secrets Engine Configuration + +```bash +kubectl exec vault-0 -- vault kv put secret/devops-info/config \ + username="" \ + password="" \ + api_key="" +``` + +Verification: +```bash +kubectl exec vault-0 -- vault kv get secret/devops-info/config +``` +``` +====== Data ====== +Key Value +--- ----- +api_key +password +username +``` + +### Kubernetes Auth Method + +```bash +# Enable K8s auth +kubectl exec vault-0 -- vault auth enable kubernetes + +# Configure with cluster credentials +kubectl exec vault-0 -- /bin/sh -c ' +vault write auth/kubernetes/config \ + kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" \ + token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ + kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ + issuer="https://kubernetes.default.svc.cluster.local"' +``` + +### Policy (sanitized) + +```hcl +path "secret/data/devops-info/*" { + capabilities = ["read"] +} +``` + +### Role + +```bash +vault write auth/kubernetes/role/devops-info \ + bound_service_account_names=devops-info-python,devops-info-go \ + bound_service_account_namespaces=default \ + policies=devops-info \ + ttl=24h +``` + +### Proof of Secret Injection + +All app pods run 2/2 containers (app + vault-agent sidecar): +``` +NAME READY STATUS RESTARTS AGE +devops-info-python-c7c769d5-dbgjd 2/2 Running 0 2m +devops-info-go-5ddf474d69-4h4z7 2/2 Running 0 1m +``` + +Secrets injected at `/vault/secrets/config`: +```bash +kubectl exec -c devops-info-python -- cat /vault/secrets/config +``` +``` +DB_USERNAME= +DB_PASSWORD= +API_KEY= +``` + +![vault_secret_injected](docs/screenshots/vault_secret_injected.png) + +### Sidecar Injection Pattern + +1. **Mutating Webhook** — Vault Agent Injector watches for pods with `vault.hashicorp.com/agent-inject: "true"` annotation +2. **Init Container** — `vault-agent-init` runs first, authenticates with Vault via K8s auth, fetches secrets, writes them to a shared volume +3. **Sidecar Container** — `vault-agent` runs alongside the app, keeps secrets refreshed and handles token renewal +4. **Shared Volume** — `/vault/secrets/` is an in-memory `tmpfs` volume mounted in both the init/sidecar and app containers + +``` +Pod Startup Flow: + ┌─────────────────┐ ┌──────────┐ ┌─────────────┐ + │ vault-agent-init │────▶│ Vault │────▶│ Write to │ + │ (init container) │ │ Server │ │ /vault/ │ + └─────────────────┘ └──────────┘ │ secrets/ │ + └──────┬──────┘ + │ shared volume + ┌─────────────────┐ ┌──────┴──────┐ + │ vault-agent │◀─── keeps refreshing │ App reads │ + │ (sidecar) │ │ secrets │ + └─────────────────┘ └─────────────┘ +``` + +--- + +## 5. Security Analysis + +### K8s Secrets vs Vault + +| Feature | K8s Secrets | HashiCorp Vault | +|---------|------------|-----------------| +| Encryption at rest | Only if etcd encryption enabled | Built-in (AES-GCM) | +| Access control | RBAC only | Fine-grained policies + RBAC | +| Audit logging | K8s audit logs (if enabled) | Built-in detailed audit log | +| Secret rotation | Manual — redeploy needed | Automatic via agent refresh | +| Dynamic secrets | No | Yes (DB creds, cloud IAM, etc.) | +| Versioning | No | KV v2 supports versioning | +| Lease/TTL | No | Yes — auto-expiration | +| Complexity | Low | Medium-High | +| External access | No — cluster only | Yes — API-driven | + +### When to Use Each + +**Use K8s Secrets when:** +- Simple, static configs (API URLs, feature flags) +- Development/staging environments +- Small teams with low compliance requirements +- Quick prototyping + +**Use Vault when:** +- Sensitive production credentials (DB passwords, API keys) +- Compliance requirements (PCI, HIPAA, SOC2) +- Dynamic secrets needed (short-lived DB credentials) +- Secret rotation without redeployment +- Multi-cluster or multi-cloud environments +- Audit trail required + +### Production Recommendations + +1. **Never** commit real secrets to Git — use `--set` or external managers +2. Enable **etcd encryption at rest** if using K8s Secrets +3. Use **RBAC** to restrict secret access to specific service accounts +4. Deploy Vault in **HA mode** (not dev mode) with Raft/Consul backend +5. Enable **Vault audit logging** for compliance +6. Use **dynamic secrets** where possible (database, cloud providers) +7. Implement **secret rotation** policies +8. Use **namespaces** in Vault for multi-tenant environments + +--- + +## 6. Bonus — Vault Agent Templates + +### Template Annotation + +Vault Agent supports custom rendering of secrets using Go templates. Instead of raw JSON output, secrets are rendered in `.env` format: + +```yaml +vault.hashicorp.com/agent-inject-template-config: | + {{- with secret "secret/data/devops-info/config" -}} + DB_USERNAME={{ .Data.data.username }} + DB_PASSWORD={{ .Data.data.password }} + API_KEY={{ .Data.data.api_key }} + {{- end -}} +``` + +This is configured in `values.yaml` and rendered by the deployment template: + +```yaml +vault: + enabled: true + role: "devops-info" + secretPath: "secret/data/devops-info/config" + template: | + {{- with secret "secret/data/devops-info/config" -}} + DB_USERNAME={{ .Data.data.username }} + DB_PASSWORD={{ .Data.data.password }} + API_KEY={{ .Data.data.api_key }} + {{- end -}} +``` + +### Rendered File Content + +The file at `/vault/secrets/config` inside the pod: +``` +DB_USERNAME= +DB_PASSWORD= +API_KEY= +``` + +### Dynamic Secret Rotation + +- **Vault Agent sidecar** continuously runs and monitors secret leases +- When secrets are updated in Vault, the agent detects changes and re-renders templates +- Default refresh interval: `5m` (configurable via `vault.hashicorp.com/agent-cache-enable`) +- `vault.hashicorp.com/agent-inject-command` annotation can trigger a script when secrets change (e.g., reload app config, send SIGHUP) + +### Named Template in `_helpers.tpl` + +Added `common.envVars` to `common-lib/templates/_helpers.tpl`: + +```yaml +{{- define "common.envVars" -}} +{{- range .Values.env }} +- name: {{ .name }} + value: {{ .value | quote }} +{{- end }} +{{- end }} +``` + +Used in `deployment.yaml` via `include`: +```yaml +env: + {{- include "common.envVars" . | nindent 12 }} +``` + +**Benefits:** +- **DRY principle** — env var rendering logic defined once, reused across charts +- **Consistency** — all charts render env vars the same way +- **Maintainability** — change format in one place, applies everywhere +- **Library pattern** — `common-lib` provides shared templates consumed by app charts diff --git a/k8s/STATEFULSET.md b/k8s/STATEFULSET.md new file mode 100644 index 0000000000..4bfb831285 --- /dev/null +++ b/k8s/STATEFULSET.md @@ -0,0 +1,286 @@ +# StatefulSets & Persistent Storage (Lab 15) + +`devops-info-python` deployed as a StatefulSet with stable pod identities, headless DNS, and per-pod persistent storage. + +## 1. Why StatefulSet + +The Flask app persists its visit counter to `/data/visits` (`app_python/app.py`, line 87: `VISITS_FILE = os.getenv('VISITS_FILE', '/data/visits')`). With a Deployment + a single shared `ReadWriteOnce` PVC the replicas race and only one ever runs. A StatefulSet gives every replica its own identity and PVC, so the counter is per-pod and survives pod restarts. + +### Guarantees + +1. **Stable network identity** — pods are named `-0`, `-1`, ...; each gets DNS `-N...svc.cluster.local` that follows the pod across reschedules. +2. **Stable persistent storage** — each ordinal owns a dedicated PVC, created from `volumeClaimTemplates`. The PVC is preserved when the pod is deleted/rescheduled and even when the StatefulSet is scaled down (manual cleanup). +3. **Ordered, graceful lifecycle** — with `podManagementPolicy: OrderedReady`, pods are created and updated 0 → 1 → 2 (and torn down in reverse) so quorum/leader-election workloads stay consistent. + +### Deployment vs StatefulSet + +| | Deployment | StatefulSet | +|---|---|---| +| Pod names | `-` | `-0`, `-1`, ... | +| Storage | One shared PVC (or `emptyDir`) | One PVC per replica via `volumeClaimTemplates` | +| Network | Single Service VIP | Headless Service → per-pod DNS A records | +| Pod startup | Parallel | Ordered (`OrderedReady`) by default | +| Pod deletion on scale-down | Random | Reverse ordinal (N-1 → 0) | +| PVC reclaim | None (shared) | PVCs survive scale-down | +| Update strategies | RollingUpdate, Recreate | RollingUpdate (with `partition`), OnDelete | +| Best for | Stateless web/API services | Databases, queues, leader-elected clusters (Postgres, Kafka, ES, MongoDB) | + +### Headless Service + +A Service with `spec.clusterIP: None` skips kube-proxy load balancing. Instead, kube-dns publishes: +- one A record per ready pod under `..svc.cluster.local` +- a per-pod A record `...svc.cluster.local` + +That lets clients (or peers in a quorum) talk directly to a specific replica — required for stateful clustering. + +--- + +## 2. Implementation + +Files: +- [`templates/statefulset.yaml`](devops-info-python/templates/statefulset.yaml) — gated by `.Values.statefulset.enabled` +- [`templates/service-headless.yaml`](devops-info-python/templates/service-headless.yaml) — `clusterIP: None` +- [`templates/deployment.yaml`](devops-info-python/templates/deployment.yaml), [`templates/rollout.yaml`](devops-info-python/templates/rollout.yaml), [`templates/pvc.yaml`](devops-info-python/templates/pvc.yaml) — auto-skip when StatefulSet is enabled +- [`values-statefulset.yaml`](devops-info-python/values-statefulset.yaml) — primary config +- [`values-statefulset-partition.yaml`](devops-info-python/values-statefulset-partition.yaml), [`values-statefulset-ondelete.yaml`](devops-info-python/values-statefulset-ondelete.yaml) — bonus + +### Key fragments + +```yaml +# statefulset.yaml +spec: + serviceName: devops-info-python-headless + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + template: + spec: + containers: + - name: devops-info-python + volumeMounts: + - name: data + mountPath: /data + env: + - name: VISITS_FILE + value: "/data/visits" + volumeClaimTemplates: + - metadata: { name: data } + spec: + accessModes: [ "ReadWriteOnce" ] + resources: { requests: { storage: 100Mi } } + storageClassName: standard +``` + +```yaml +# service-headless.yaml +spec: + clusterIP: None + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: devops-info-python + app.kubernetes.io/instance: devops-info-python +``` + +### Install + +```bash +kubectl create ns sts +helm install devops-info-python k8s/devops-info-python \ + -n sts -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-statefulset.yaml +``` + +--- + +## 3. Resource Verification + +```text +$ kubectl -n sts get po,sts,svc,pvc -o wide +NAME READY STATUS RESTARTS AGE IP NODE +pod/devops-info-python-0 1/1 Running 0 49s 10.244.0.202 minikube +pod/devops-info-python-1 1/1 Running 0 41s 10.244.0.203 minikube +pod/devops-info-python-2 1/1 Running 0 33s 10.244.0.204 minikube + +NAME READY AGE IMAGES +statefulset.apps/devops-info-python 3/3 49s aezuraa/devops-info-service:python + +NAME TYPE CLUSTER-IP PORT(S) +service/devops-info-python ClusterIP 10.107.40.111 80/TCP # external access +service/devops-info-python-headless ClusterIP None 80/TCP # per-pod DNS + +NAME STATUS CAPACITY ACCESS MODES STORAGECLASS +pvc/data-devops-info-python-0 Bound 100Mi RWO standard +pvc/data-devops-info-python-1 Bound 100Mi RWO standard +pvc/data-devops-info-python-2 Bound 100Mi RWO standard +``` + +- 3 pods with **ordinal** names (no random hash). +- 3 PVCs, one per pod, named `data--`. +- Two services: one regular ClusterIP (entry point) + one **headless** for direct pod targeting. + +![Resources overview](screenshots/lab15/01-resources.png) + +--- + +## 4. Network Identity (DNS) + +From inside `devops-info-python-0`: + +```text +=== Hostname === +devops-info-python-0 + +=== Resolve devops-info-python-1 (peer) === +10.244.0.203 devops-info-python-1.devops-info-python-headless.sts.svc.cluster.local + +=== Resolve headless service (returns ALL pod IPs) === +('devops-info-python-headless.sts.svc.cluster.local', [], + ['10.244.0.202', '10.244.0.204', '10.244.0.203']) + +=== Resolve clusterIP service === +('devops-info-python.sts.svc.cluster.local', [], ['10.107.40.111']) +``` + +DNS naming pattern: **`...svc.cluster.local`** → individual pod IP. +Resolving the headless service alone returns the **list of all ready pod IPs** (one A record per pod), enabling direct client-side load balancing or quorum gossip. + +![DNS resolution from pod-0](screenshots/lab15/02-dns-resolution.png) + +--- + +## 5. Per-Pod Storage Isolation + +Each pod was hit a different number of times: + +```text +=== Hit pod-0 three times === pod-0 → visits=1, 2, 3 +=== Hit pod-1 once === pod-1 → visits=1 +=== Hit pod-2 twice === pod-2 → visits=1, 2 + +=== /visits per pod === +pod-0 visits: {"visits":3} +pod-1 visits: {"visits":1} +pod-2 visits: {"visits":2} +``` + +Different counters prove that the pods do **not** share storage — each writes to its own `data-devops-info-python-N` PVC mounted at `/data`. + +![Per-pod visits](screenshots/lab15/03-per-pod-visits.png) + +--- + +## 6. Persistence Across Pod Deletion + +```text +=== Before delete === +$ kubectl -n sts exec devops-info-python-0 -- cat /data/visits +3 + +=== Delete pod-0 === +$ kubectl -n sts delete pod devops-info-python-0 +pod "devops-info-python-0" deleted + +=== Pod recreated with new IP, same name + same PVC === +NAME READY STATUS AGE IP +devops-info-python-0 1/1 Running 8s 10.244.0.205 <-- was 10.244.0.202 + +=== After restart === +$ kubectl -n sts exec devops-info-python-0 -- cat /data/visits +3 + +$ curl localhost:8080/visits # via the same pod, after restart +{"visits":3} + +# Increment once more — counter continues, doesn't reset +$ curl localhost:8080/ → visits=4 + +# Other pods unaffected +pod-1: {"visits":1} +pod-2: {"visits":2} +``` + +The PVC `data-devops-info-python-0` outlived the pod. The new pod (different IP `10.244.0.205`) re-attached to the same volume and read the existing `visits=3`. + +![Persistence test](screenshots/lab15/04-persistence-test.png) + +--- + +## 7. Bonus — Update Strategies + +### 7.1 Partitioned RollingUpdate + +```yaml +updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 2 # only pods with ordinal >= 2 are updated +``` + +```bash +helm upgrade devops-info-python k8s/devops-info-python -n sts \ + -f k8s/devops-info-python/values.yaml \ + -f k8s/devops-info-python/values-statefulset.yaml \ + -f k8s/devops-info-python/values-statefulset-partition.yaml \ + --set image.tag=go +``` + +Result — only `pod-2` adopted the new image: + +```text +=== Per-pod image (partition=2) === +pod-0: aezuraa/devops-info-service:python <-- frozen +pod-1: aezuraa/devops-info-service:python <-- frozen +pod-2: aezuraa/devops-info-service:go <-- updated + +=== updateStrategy === +{ "type": "RollingUpdate", + "rollingUpdate": { "partition": 2, "maxUnavailable": 1 } } +``` + +**Use case:** canary-style validation on the highest ordinal first; lower `partition` once metrics look good to roll the rest. + +![Partition update](screenshots/lab15/05-partition.png) + +### 7.2 OnDelete + +```yaml +updateStrategy: + type: OnDelete +``` + +The controller writes the new pod template but **never** rolls existing pods automatically. They adopt the new spec only when the operator explicitly deletes them. + +```bash +helm upgrade devops-info-python ... -f values-statefulset-ondelete.yaml --set image.tag=python +# Wait — nothing changes: +pod-0: go pod-1: go pod-2: go + +kubectl -n sts delete pod devops-info-python-1 +# After recreation: +pod-0: go pod-1: python pod-2: go +``` + +**Use cases:** +- Workloads that need explicit drain / leader hand-off before restart (Postgres primary, Kafka broker). +- Strict change-control where every restart is a manual operation. +- Coordinating updates with external load-balancer cutovers. + +![OnDelete](screenshots/lab15/06-ondelete.png) + +--- + +## 8. CLI Cheatsheet + +| Action | Command | +|---|---| +| Install | `helm install devops-info-python k8s/devops-info-python -n sts -f values.yaml -f values-statefulset.yaml` | +| Watch ordered startup | `kubectl -n sts get pods -w` | +| Inspect StatefulSet | `kubectl -n sts get sts devops-info-python -o yaml` | +| List per-pod PVCs | `kubectl -n sts get pvc -l app.kubernetes.io/name=devops-info-python` | +| Resolve peer DNS | `kubectl -n sts exec sts/devops-info-python -- getent hosts ...svc.cluster.local` | +| Read data on disk | `kubectl -n sts exec devops-info-python-0 -- cat /data/visits` | +| Delete pod (PVC kept) | `kubectl -n sts delete pod devops-info-python-0` | +| Scale down (PVCs kept) | `kubectl -n sts scale sts devops-info-python --replicas=2` | +| Manual cleanup of orphaned PVCs | `kubectl -n sts delete pvc data-devops-info-python-2` | +| Set partition | `helm upgrade ... --set statefulset.updateStrategy.partition=N` | +| Switch to OnDelete | `... -f values-statefulset-ondelete.yaml` | diff --git a/k8s/argocd/application-dev.yaml b/k8s/argocd/application-dev.yaml new file mode 100644 index 0000000000..106568f7d4 --- /dev/null +++ b/k8s/argocd/application-dev.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: python-app-dev + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + source: + repoURL: https://github.com/AEZuraa/DevOps-Core-Course.git + targetRevision: lab13 + path: k8s/devops-info-python + helm: + valueFiles: + - values.yaml + - values-dev.yaml + destination: + server: https://kubernetes.default.svc + namespace: dev + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/k8s/argocd/application-prod.yaml b/k8s/argocd/application-prod.yaml new file mode 100644 index 0000000000..673e7cfa90 --- /dev/null +++ b/k8s/argocd/application-prod.yaml @@ -0,0 +1,23 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: python-app-prod + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + source: + repoURL: https://github.com/AEZuraa/DevOps-Core-Course.git + targetRevision: lab13 + path: k8s/devops-info-python + helm: + valueFiles: + - values.yaml + - values-prod.yaml + destination: + server: https://kubernetes.default.svc + namespace: prod + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/k8s/argocd/application.yaml b/k8s/argocd/application.yaml new file mode 100644 index 0000000000..4c7f04b45f --- /dev/null +++ b/k8s/argocd/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: python-app + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + project: default + source: + repoURL: https://github.com/AEZuraa/DevOps-Core-Course.git + targetRevision: lab13 + path: k8s/devops-info-python + helm: + valueFiles: + - values.yaml + destination: + server: https://kubernetes.default.svc + namespace: default + syncPolicy: + syncOptions: + - CreateNamespace=true diff --git a/k8s/argocd/applicationset.yaml b/k8s/argocd/applicationset.yaml new file mode 100644 index 0000000000..5e85e438b6 --- /dev/null +++ b/k8s/argocd/applicationset.yaml @@ -0,0 +1,56 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: python-app-set + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - list: + elements: + - env: dev + namespace: appset-dev + valuesFile: values-dev.yaml + autoSync: "true" + - env: prod + namespace: appset-prod + valuesFile: values-prod.yaml + autoSync: "false" + template: + metadata: + name: 'python-appset-{{.env}}' + labels: + env: '{{.env}}' + spec: + project: default + source: + repoURL: https://github.com/AEZuraa/DevOps-Core-Course.git + targetRevision: lab13 + path: k8s/devops-info-python + helm: + valueFiles: + - values.yaml + - '{{.valuesFile}}' + parameters: + - name: service.type + value: ClusterIP + - name: service.nodePort + value: "" + destination: + server: https://kubernetes.default.svc + namespace: '{{.namespace}}' + syncPolicy: + syncOptions: + - CreateNamespace=true + templatePatch: | + {{- if eq .env "dev" }} + spec: + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + {{- end }} diff --git a/k8s/argocd/screenshots/argocd-apps-overview.png b/k8s/argocd/screenshots/argocd-apps-overview.png new file mode 100644 index 0000000000..7076747b32 Binary files /dev/null and b/k8s/argocd/screenshots/argocd-apps-overview.png differ diff --git a/k8s/argocd/screenshots/argocd-diff-view.png b/k8s/argocd/screenshots/argocd-diff-view.png new file mode 100644 index 0000000000..586f6f5919 Binary files /dev/null and b/k8s/argocd/screenshots/argocd-diff-view.png differ diff --git a/k8s/argocd/screenshots/argocd-login.png b/k8s/argocd/screenshots/argocd-login.png new file mode 100644 index 0000000000..de6d302558 Binary files /dev/null and b/k8s/argocd/screenshots/argocd-login.png differ diff --git a/k8s/argocd/screenshots/argocd-python-app-dev-tree.png b/k8s/argocd/screenshots/argocd-python-app-dev-tree.png new file mode 100644 index 0000000000..b2a50a670c Binary files /dev/null and b/k8s/argocd/screenshots/argocd-python-app-dev-tree.png differ diff --git a/k8s/argocd/screenshots/argocd-python-app-prod-sync.png b/k8s/argocd/screenshots/argocd-python-app-prod-sync.png new file mode 100644 index 0000000000..aeb03f8a81 Binary files /dev/null and b/k8s/argocd/screenshots/argocd-python-app-prod-sync.png differ diff --git a/k8s/argocd/screenshots/argocd-self-heal-event.png b/k8s/argocd/screenshots/argocd-self-heal-event.png new file mode 100644 index 0000000000..dbba4fe6d4 Binary files /dev/null and b/k8s/argocd/screenshots/argocd-self-heal-event.png differ diff --git a/k8s/common-lib/Chart.yaml b/k8s/common-lib/Chart.yaml new file mode 100644 index 0000000000..bbc2efaece --- /dev/null +++ b/k8s/common-lib/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: common-lib +description: Common Helm templates shared across DevOps Info services +type: library +version: 0.1.0 diff --git a/k8s/common-lib/templates/_helpers.tpl b/k8s/common-lib/templates/_helpers.tpl new file mode 100644 index 0000000000..be756c30ac --- /dev/null +++ b/k8s/common-lib/templates/_helpers.tpl @@ -0,0 +1,58 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "common.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "common.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "common.labels" -}} +helm.sh/chart: {{ include "common.chart" . }} +{{ include "common.selectorLabels" . }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "common.selectorLabels" -}} +app.kubernetes.io/name: {{ include "common.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common environment variables from .Values.env list. +Renders env entries from values avoiding raw toYaml for DRY reuse. +*/}} +{{- define "common.envVars" -}} +{{- range .Values.env }} +- name: {{ .name }} + value: {{ .value | quote }} +{{- end }} +{{- end }} diff --git a/k8s/deployment-go.yml b/k8s/deployment-go.yml new file mode 100644 index 0000000000..70d8788e80 --- /dev/null +++ b/k8s/deployment-go.yml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: devops-info-go + labels: + app: devops-info-go + version: "1.0.0" + managed-by: kubectl +spec: + replicas: 3 + selector: + matchLabels: + app: devops-info-go + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: devops-info-go + version: "1.0.0" + spec: + containers: + - name: devops-info-go + image: aezuraa/devops-info-service:go + imagePullPolicy: Never + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + timeoutSeconds: 2 + failureThreshold: 3 diff --git a/k8s/deployment.yml b/k8s/deployment.yml new file mode 100644 index 0000000000..9c049d2518 --- /dev/null +++ b/k8s/deployment.yml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: devops-info-python + labels: + app: devops-info-python + version: "1.0.0" + managed-by: kubectl +spec: + replicas: 3 + selector: + matchLabels: + app: devops-info-python + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: devops-info-python + version: "1.0.0" + spec: + containers: + - name: devops-info-python + image: aezuraa/devops-info-service:python + imagePullPolicy: Never + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: DEBUG + value: "False" + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" + livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 + timeoutSeconds: 2 + failureThreshold: 3 diff --git a/k8s/devops-info-go/Chart.lock b/k8s/devops-info-go/Chart.lock new file mode 100644 index 0000000000..a34b2a7781 --- /dev/null +++ b/k8s/devops-info-go/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common-lib + repository: file://../common-lib + version: 0.1.0 +digest: sha256:20073f8787800aa68dec8f48b8c4ee0c196f0d6ee2eba090164f5a9478995895 +generated: "2026-03-26T13:13:50.094871+03:00" diff --git a/k8s/devops-info-go/Chart.yaml b/k8s/devops-info-go/Chart.yaml new file mode 100644 index 0000000000..7db7cc2a83 --- /dev/null +++ b/k8s/devops-info-go/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: devops-info-go +description: Helm chart for DevOps Info Go Service +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - go + - devops + - web +maintainers: + - name: Egor +dependencies: + - name: common-lib + version: 0.1.0 + repository: "file://../common-lib" diff --git a/k8s/devops-info-go/charts/common-lib-0.1.0.tgz b/k8s/devops-info-go/charts/common-lib-0.1.0.tgz new file mode 100644 index 0000000000..a984b2bb37 Binary files /dev/null and b/k8s/devops-info-go/charts/common-lib-0.1.0.tgz differ diff --git a/k8s/devops-info-go/files/config.json b/k8s/devops-info-go/files/config.json new file mode 100644 index 0000000000..998b271f3c --- /dev/null +++ b/k8s/devops-info-go/files/config.json @@ -0,0 +1,13 @@ +{ + "app_name": "devops-info-go", + "environment": "dev", + "version": "1.0.0", + "features": { + "debug_mode": false, + "visits_tracking": true + }, + "logging": { + "level": "INFO", + "format": "text" + } +} diff --git a/k8s/devops-info-go/templates/NOTES.txt b/k8s/devops-info-go/templates/NOTES.txt new file mode 100644 index 0000000000..d7fd5460d4 --- /dev/null +++ b/k8s/devops-info-go/templates/NOTES.txt @@ -0,0 +1,16 @@ +DevOps Info Go Service has been deployed! + +Release: {{ .Release.Name }} +Chart: {{ .Chart.Name }}-{{ .Chart.Version }} +App: {{ .Chart.AppVersion }} + +{{- if eq .Values.service.type "ClusterIP" }} + +Access the application via port-forward: + kubectl port-forward svc/{{ include "common.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.targetPort }} +{{- else if eq .Values.service.type "NodePort" }} + +Access the application via NodePort: + export NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}') + echo http://$NODE_IP:{{ .Values.service.nodePort }} +{{- end }} diff --git a/k8s/devops-info-go/templates/_helpers.tpl b/k8s/devops-info-go/templates/_helpers.tpl new file mode 100644 index 0000000000..c163d754b0 --- /dev/null +++ b/k8s/devops-info-go/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* +Override common templates with chart-specific names. +This file re-exports common-lib helpers under local names. +*/}} + +{{- define "devops-info-go.name" -}} +{{- include "common.name" . }} +{{- end }} + +{{- define "devops-info-go.fullname" -}} +{{- include "common.fullname" . }} +{{- end }} + +{{- define "devops-info-go.chart" -}} +{{- include "common.chart" . }} +{{- end }} + +{{- define "devops-info-go.labels" -}} +{{- include "common.labels" . }} +{{- end }} + +{{- define "devops-info-go.selectorLabels" -}} +{{- include "common.selectorLabels" . }} +{{- end }} + +{{- define "devops-info-go.envVars" -}} +{{- include "common.envVars" . }} +{{- end }} diff --git a/k8s/devops-info-go/templates/configmap.yaml b/k8s/devops-info-go/templates/configmap.yaml new file mode 100644 index 0000000000..4b25da53e9 --- /dev/null +++ b/k8s/devops-info-go/templates/configmap.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-config + labels: + {{- include "common.labels" . | nindent 4 }} +data: + config.json: |- +{{ .Files.Get "files/config.json" | indent 4 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-env + labels: + {{- include "common.labels" . | nindent 4 }} +data: + APP_ENV: {{ .Values.config.environment | quote }} + LOG_LEVEL: {{ .Values.config.logLevel | quote }} + APP_NAME: {{ .Values.config.appName | quote }} diff --git a/k8s/devops-info-go/templates/deployment.yaml b/k8s/devops-info-go/templates/deployment.yaml new file mode 100644 index 0000000000..fcb13aa455 --- /dev/null +++ b/k8s/devops-info-go/templates/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + strategy: + type: {{ .Values.strategy.type }} + {{- if eq .Values.strategy.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.strategy.rollingUpdate.maxSurge }} + maxUnavailable: {{ .Values.strategy.rollingUpdate.maxUnavailable }} + {{- end }} + template: + metadata: + labels: + {{- include "common.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.vault }} + {{- if .enabled }} + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: {{ .role | quote }} + vault.hashicorp.com/agent-inject-secret-config: {{ .secretPath | quote }} + {{- if .template }} + vault.hashicorp.com/agent-inject-template-config: | + {{- .template | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "common.fullname" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- if .Values.env }} + env: + {{- include "common.envVars" . | nindent 12 }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "common.fullname" . }}-secret + - configMapRef: + name: {{ include "common.fullname" . }}-env + volumeMounts: + - name: config-volume + mountPath: /config + {{- if .Values.persistence.enabled }} + - name: data-volume + mountPath: /data + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumes: + - name: config-volume + configMap: + name: {{ include "common.fullname" . }}-config + {{- if .Values.persistence.enabled }} + - name: data-volume + persistentVolumeClaim: + claimName: {{ include "common.fullname" . }}-data + {{- end }} diff --git a/k8s/devops-info-go/templates/pvc.yaml b/k8s/devops-info-go/templates/pvc.yaml new file mode 100644 index 0000000000..30e4b01ad0 --- /dev/null +++ b/k8s/devops-info-go/templates/pvc.yaml @@ -0,0 +1,17 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "common.fullname" . }}-data + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} +{{- end }} diff --git a/k8s/devops-info-go/templates/secrets.yaml b/k8s/devops-info-go/templates/secrets.yaml new file mode 100644 index 0000000000..c5f3e644e0 --- /dev/null +++ b/k8s/devops-info-go/templates/secrets.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.fullname" . }}-secret + labels: + {{- include "common.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- range $key, $value := .Values.secrets }} + {{ $key }}: {{ $value | quote }} + {{- end }} diff --git a/k8s/devops-info-go/templates/service.yaml b/k8s/devops-info-go/templates/service.yaml new file mode 100644 index 0000000000..c7313b6f1f --- /dev/null +++ b/k8s/devops-info-go/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + ports: + - protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + {{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} diff --git a/k8s/devops-info-go/templates/serviceaccount.yaml b/k8s/devops-info-go/templates/serviceaccount.yaml new file mode 100644 index 0000000000..41f9a58a44 --- /dev/null +++ b/k8s/devops-info-go/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} diff --git a/k8s/devops-info-go/values.yaml b/k8s/devops-info-go/values.yaml new file mode 100644 index 0000000000..a8689f053e --- /dev/null +++ b/k8s/devops-info-go/values.yaml @@ -0,0 +1,79 @@ +replicaCount: 3 + +image: + repository: aezuraa/devops-info-service + tag: "go" + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +containerPort: 8080 + +env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + +livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + timeoutSeconds: 2 + failureThreshold: 3 + +secrets: + DB_USERNAME: "placeholder" + DB_PASSWORD: "placeholder" + +vault: + enabled: true + role: "devops-info" + secretPath: "secret/data/devops-info/config" + template: | + {{- with secret "secret/data/devops-info/config" -}} + DB_USERNAME={{ .Data.data.username }} + DB_PASSWORD={{ .Data.data.password }} + API_KEY={{ .Data.data.api_key }} + {{- end -}} + +config: + environment: "dev" + logLevel: "INFO" + appName: "devops-info-go" + +persistence: + enabled: true + size: 100Mi + storageClass: "" + +nameOverride: "" +fullnameOverride: "" diff --git a/k8s/devops-info-python/Chart.lock b/k8s/devops-info-python/Chart.lock new file mode 100644 index 0000000000..8ef6b25321 --- /dev/null +++ b/k8s/devops-info-python/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common-lib + repository: file://../common-lib + version: 0.1.0 +digest: sha256:20073f8787800aa68dec8f48b8c4ee0c196f0d6ee2eba090164f5a9478995895 +generated: "2026-03-26T13:13:46.076912+03:00" diff --git a/k8s/devops-info-python/Chart.yaml b/k8s/devops-info-python/Chart.yaml new file mode 100644 index 0000000000..d19b4111ee --- /dev/null +++ b/k8s/devops-info-python/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: devops-info-python +description: Helm chart for DevOps Info Python Service +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - python + - devops + - web +maintainers: + - name: Egor +dependencies: + - name: common-lib + version: 0.1.0 + repository: "file://../common-lib" diff --git a/k8s/devops-info-python/charts/common-lib-0.1.0.tgz b/k8s/devops-info-python/charts/common-lib-0.1.0.tgz new file mode 100644 index 0000000000..a984b2bb37 Binary files /dev/null and b/k8s/devops-info-python/charts/common-lib-0.1.0.tgz differ diff --git a/k8s/devops-info-python/files/config.json b/k8s/devops-info-python/files/config.json new file mode 100644 index 0000000000..487cbb86a7 --- /dev/null +++ b/k8s/devops-info-python/files/config.json @@ -0,0 +1,14 @@ +{ + "app_name": "devops-info-python", + "environment": "dev", + "version": "1.0.0", + "features": { + "metrics_enabled": true, + "debug_mode": false, + "visits_tracking": true + }, + "logging": { + "level": "INFO", + "format": "json" + } +} diff --git a/k8s/devops-info-python/templates/NOTES.txt b/k8s/devops-info-python/templates/NOTES.txt new file mode 100644 index 0000000000..a34c09b24d --- /dev/null +++ b/k8s/devops-info-python/templates/NOTES.txt @@ -0,0 +1,20 @@ +DevOps Info Python Service has been deployed! + +Release: {{ .Release.Name }} +Chart: {{ .Chart.Name }}-{{ .Chart.Version }} +App: {{ .Chart.AppVersion }} + +{{- if eq .Values.service.type "NodePort" }} + +Access the application via NodePort: + export NODE_IP=$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}') + echo http://$NODE_IP:{{ .Values.service.nodePort }} +{{- else if eq .Values.service.type "LoadBalancer" }} + +Access the application via LoadBalancer: + kubectl get svc {{ include "common.fullname" . }} -w +{{- else }} + +Access the application via port-forward: + kubectl port-forward svc/{{ include "common.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.targetPort }} +{{- end }} diff --git a/k8s/devops-info-python/templates/_helpers.tpl b/k8s/devops-info-python/templates/_helpers.tpl new file mode 100644 index 0000000000..402ea35dd9 --- /dev/null +++ b/k8s/devops-info-python/templates/_helpers.tpl @@ -0,0 +1,28 @@ +{{/* +Override common templates with chart-specific names. +This file re-exports common-lib helpers under local names. +*/}} + +{{- define "devops-info-python.name" -}} +{{- include "common.name" . }} +{{- end }} + +{{- define "devops-info-python.fullname" -}} +{{- include "common.fullname" . }} +{{- end }} + +{{- define "devops-info-python.chart" -}} +{{- include "common.chart" . }} +{{- end }} + +{{- define "devops-info-python.labels" -}} +{{- include "common.labels" . }} +{{- end }} + +{{- define "devops-info-python.selectorLabels" -}} +{{- include "common.selectorLabels" . }} +{{- end }} + +{{- define "devops-info-python.envVars" -}} +{{- include "common.envVars" . }} +{{- end }} diff --git a/k8s/devops-info-python/templates/analysistemplate.yaml b/k8s/devops-info-python/templates/analysistemplate.yaml new file mode 100644 index 0000000000..9c3412d05b --- /dev/null +++ b/k8s/devops-info-python/templates/analysistemplate.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rollout.enabled .Values.rollout.analysis.enabled }} +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: {{ include "common.fullname" . }}-healthcheck + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + args: + - name: service-name + metrics: + - name: health-probe + interval: {{ .Values.rollout.analysis.interval | default "10s" }} + count: {{ .Values.rollout.analysis.count | default 3 }} + failureLimit: {{ .Values.rollout.analysis.failureLimit | default 1 }} + successCondition: 'result == "healthy"' + provider: + web: + url: "http://{{ "{{args.service-name}}" }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }}/health" + timeoutSeconds: 5 + jsonPath: "{$.status}" +{{- end }} diff --git a/k8s/devops-info-python/templates/configmap.yaml b/k8s/devops-info-python/templates/configmap.yaml new file mode 100644 index 0000000000..4b25da53e9 --- /dev/null +++ b/k8s/devops-info-python/templates/configmap.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-config + labels: + {{- include "common.labels" . | nindent 4 }} +data: + config.json: |- +{{ .Files.Get "files/config.json" | indent 4 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.fullname" . }}-env + labels: + {{- include "common.labels" . | nindent 4 }} +data: + APP_ENV: {{ .Values.config.environment | quote }} + LOG_LEVEL: {{ .Values.config.logLevel | quote }} + APP_NAME: {{ .Values.config.appName | quote }} diff --git a/k8s/devops-info-python/templates/deployment.yaml b/k8s/devops-info-python/templates/deployment.yaml new file mode 100644 index 0000000000..53539d3cd1 --- /dev/null +++ b/k8s/devops-info-python/templates/deployment.yaml @@ -0,0 +1,126 @@ +{{- if and (not .Values.rollout.enabled) (not .Values.statefulset.enabled) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + strategy: + type: {{ .Values.strategy.type }} + {{- if eq .Values.strategy.type "RollingUpdate" }} + rollingUpdate: + maxSurge: {{ .Values.strategy.rollingUpdate.maxSurge }} + maxUnavailable: {{ .Values.strategy.rollingUpdate.maxUnavailable }} + {{- end }} + template: + metadata: + labels: + {{- include "common.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.vault }} + {{- if .enabled }} + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: {{ .role | quote }} + vault.hashicorp.com/agent-inject-secret-config: {{ .secretPath | quote }} + {{- if .template }} + vault.hashicorp.com/agent-inject-template-config: | + {{- .template | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "common.fullname" . }} + {{- if .Values.initContainers.enabled }} + initContainers: + {{- if .Values.initContainers.download.enabled }} + - name: init-download + image: {{ .Values.initContainers.download.image | quote }} + command: + - sh + - -c + - | + set -eu + echo "[init-download] fetching {{ .Values.initContainers.download.url }}" + wget -O {{ .Values.initContainers.download.targetPath }} {{ .Values.initContainers.download.url }} + echo "[init-download] saved:" + ls -la {{ .Values.initContainers.download.mountPath }} + volumeMounts: + - name: work-dir + mountPath: {{ .Values.initContainers.download.mountPath }} + {{- end }} + {{- if .Values.initContainers.wait.enabled }} + - name: init-wait + image: {{ .Values.initContainers.wait.image | quote }} + command: + - sh + - -c + - | + set -eu + SVC="{{ .Values.initContainers.wait.service }}" + END=$(( $(date +%s) + {{ .Values.initContainers.wait.timeoutSeconds }} )) + echo "[init-wait] waiting for $SVC" + until nslookup "$SVC" >/dev/null 2>&1; do + if [ "$(date +%s)" -ge "$END" ]; then + echo "[init-wait] timeout waiting for $SVC" + exit 1 + fi + echo "[init-wait] $SVC not resolvable yet, retrying..." + sleep 2 + done + echo "[init-wait] $SVC resolved, proceeding" + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- if .Values.env }} + env: + {{- include "common.envVars" . | nindent 12 }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "common.fullname" . }}-secret + - configMapRef: + name: {{ include "common.fullname" . }}-env + volumeMounts: + - name: config-volume + mountPath: /config + {{- if .Values.persistence.enabled }} + - name: data-volume + mountPath: /data + {{- end }} + {{- if and .Values.initContainers.enabled .Values.initContainers.download.enabled }} + - name: work-dir + mountPath: {{ .Values.initContainers.mainMountPath }} + readOnly: true + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumes: + - name: config-volume + configMap: + name: {{ include "common.fullname" . }}-config + {{- if .Values.persistence.enabled }} + - name: data-volume + persistentVolumeClaim: + claimName: {{ include "common.fullname" . }}-data + {{- end }} + {{- if and .Values.initContainers.enabled .Values.initContainers.download.enabled }} + - name: work-dir + emptyDir: {} + {{- end }} +{{- end }} diff --git a/k8s/devops-info-python/templates/hooks/post-install-job.yaml b/k8s/devops-info-python/templates/hooks/post-install-job.yaml new file mode 100644 index 0000000000..f49215655b --- /dev/null +++ b/k8s/devops-info-python/templates/hooks/post-install-job.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ include "common.fullname" . }}-post-install" + labels: + {{- include "common.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: "{{ include "common.fullname" . }}-post-install" + spec: + restartPolicy: Never + containers: + - name: post-install-test + image: busybox + command: + - sh + - -c + - | + echo "=== Post-install smoke test ===" + echo "Release: {{ .Release.Name }}" + echo "Waiting for service to be ready..." + sleep 10 + echo "Smoke test completed successfully!" diff --git a/k8s/devops-info-python/templates/hooks/pre-install-job.yaml b/k8s/devops-info-python/templates/hooks/pre-install-job.yaml new file mode 100644 index 0000000000..2c643b1130 --- /dev/null +++ b/k8s/devops-info-python/templates/hooks/pre-install-job.yaml @@ -0,0 +1,29 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ include "common.fullname" . }}-pre-install" + labels: + {{- include "common.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: "{{ include "common.fullname" . }}-pre-install" + spec: + restartPolicy: Never + containers: + - name: pre-install-check + image: busybox + command: + - sh + - -c + - | + echo "=== Pre-install validation ===" + echo "Release: {{ .Release.Name }}" + echo "Chart: {{ .Chart.Name }}-{{ .Chart.Version }}" + echo "Checking environment readiness..." + sleep 5 + echo "Pre-install validation passed!" diff --git a/k8s/devops-info-python/templates/pvc.yaml b/k8s/devops-info-python/templates/pvc.yaml new file mode 100644 index 0000000000..dde2da52bf --- /dev/null +++ b/k8s/devops-info-python/templates/pvc.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.persistence.enabled (not .Values.statefulset.enabled) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "common.fullname" . }}-data + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} +{{- end }} diff --git a/k8s/devops-info-python/templates/rollout.yaml b/k8s/devops-info-python/templates/rollout.yaml new file mode 100644 index 0000000000..00e1a7a098 --- /dev/null +++ b/k8s/devops-info-python/templates/rollout.yaml @@ -0,0 +1,86 @@ +{{- if and .Values.rollout.enabled (not .Values.statefulset.enabled) }} +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: 3 + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "common.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.vault }} + {{- if .enabled }} + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: {{ .role | quote }} + vault.hashicorp.com/agent-inject-secret-config: {{ .secretPath | quote }} + {{- if .template }} + vault.hashicorp.com/agent-inject-template-config: | + {{- .template | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "common.fullname" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- if .Values.env }} + env: + {{- include "common.envVars" . | nindent 12 }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "common.fullname" . }}-secret + - configMapRef: + name: {{ include "common.fullname" . }}-env + volumeMounts: + - name: config-volume + mountPath: /config + {{- if .Values.persistence.enabled }} + - name: data-volume + mountPath: /data + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumes: + - name: config-volume + configMap: + name: {{ include "common.fullname" . }}-config + {{- if .Values.persistence.enabled }} + - name: data-volume + persistentVolumeClaim: + claimName: {{ include "common.fullname" . }}-data + {{- end }} + strategy: + {{- if eq .Values.rollout.strategy "canary" }} + canary: + steps: + {{- toYaml .Values.rollout.canary.steps | nindent 8 }} + {{- else if eq .Values.rollout.strategy "blueGreen" }} + blueGreen: + activeService: {{ include "common.fullname" . }} + previewService: {{ include "common.fullname" . }}-preview + autoPromotionEnabled: {{ .Values.rollout.blueGreen.autoPromotionEnabled }} + {{- if .Values.rollout.blueGreen.autoPromotionSeconds }} + autoPromotionSeconds: {{ .Values.rollout.blueGreen.autoPromotionSeconds }} + {{- end }} + scaleDownDelaySeconds: {{ .Values.rollout.blueGreen.scaleDownDelaySeconds | default 30 }} + {{- end }} +{{- end }} diff --git a/k8s/devops-info-python/templates/secrets.yaml b/k8s/devops-info-python/templates/secrets.yaml new file mode 100644 index 0000000000..c5f3e644e0 --- /dev/null +++ b/k8s/devops-info-python/templates/secrets.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.fullname" . }}-secret + labels: + {{- include "common.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- range $key, $value := .Values.secrets }} + {{ $key }}: {{ $value | quote }} + {{- end }} diff --git a/k8s/devops-info-python/templates/service-headless.yaml b/k8s/devops-info-python/templates/service-headless.yaml new file mode 100644 index 0000000000..2e45f4a0f7 --- /dev/null +++ b/k8s/devops-info-python/templates/service-headless.yaml @@ -0,0 +1,18 @@ +{{- if .Values.statefulset.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }}-headless + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + clusterIP: None + publishNotReadyAddresses: true + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} diff --git a/k8s/devops-info-python/templates/service-preview.yaml b/k8s/devops-info-python/templates/service-preview.yaml new file mode 100644 index 0000000000..72a2195a00 --- /dev/null +++ b/k8s/devops-info-python/templates/service-preview.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rollout.enabled (eq .Values.rollout.strategy "blueGreen") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }}-preview + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + ports: + - protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} +{{- end }} diff --git a/k8s/devops-info-python/templates/service.yaml b/k8s/devops-info-python/templates/service.yaml new file mode 100644 index 0000000000..ef2538c117 --- /dev/null +++ b/k8s/devops-info-python/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + {{- include "common.selectorLabels" . | nindent 4 }} + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + {{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} diff --git a/k8s/devops-info-python/templates/serviceaccount.yaml b/k8s/devops-info-python/templates/serviceaccount.yaml new file mode 100644 index 0000000000..41f9a58a44 --- /dev/null +++ b/k8s/devops-info-python/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} diff --git a/k8s/devops-info-python/templates/servicemonitor.yaml b/k8s/devops-info-python/templates/servicemonitor.yaml new file mode 100644 index 0000000000..7a84656161 --- /dev/null +++ b/k8s/devops-info-python/templates/servicemonitor.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} + release: {{ .Values.serviceMonitor.release | quote }} +spec: + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + endpoints: + - port: {{ .Values.serviceMonitor.endpoint.port }} + path: {{ .Values.serviceMonitor.endpoint.path }} + interval: {{ .Values.serviceMonitor.endpoint.interval }} + scrapeTimeout: {{ .Values.serviceMonitor.endpoint.scrapeTimeout }} +{{- end }} diff --git a/k8s/devops-info-python/templates/statefulset.yaml b/k8s/devops-info-python/templates/statefulset.yaml new file mode 100644 index 0000000000..93d23efb10 --- /dev/null +++ b/k8s/devops-info-python/templates/statefulset.yaml @@ -0,0 +1,85 @@ +{{- if .Values.statefulset.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "common.fullname" . }} + labels: + {{- include "common.labels" . | nindent 4 }} +spec: + serviceName: {{ include "common.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy | default "OrderedReady" }} + selector: + matchLabels: + {{- include "common.selectorLabels" . | nindent 6 }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy.type | default "RollingUpdate" }} + {{- if and (eq (.Values.statefulset.updateStrategy.type | default "RollingUpdate") "RollingUpdate") (hasKey .Values.statefulset.updateStrategy "partition") }} + rollingUpdate: + partition: {{ .Values.statefulset.updateStrategy.partition }} + {{- end }} + template: + metadata: + labels: + {{- include "common.selectorLabels" . | nindent 8 }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.vault }} + {{- if .enabled }} + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: {{ .role | quote }} + vault.hashicorp.com/agent-inject-secret-config: {{ .secretPath | quote }} + {{- if .template }} + vault.hashicorp.com/agent-inject-template-config: | + {{- .template | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "common.fullname" . }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- if .Values.env }} + env: + {{- include "common.envVars" . | nindent 12 }} + {{- end }} + envFrom: + - secretRef: + name: {{ include "common.fullname" . }}-secret + - configMapRef: + name: {{ include "common.fullname" . }}-env + volumeMounts: + - name: config-volume + mountPath: /config + - name: data + mountPath: /data + resources: + {{- toYaml .Values.resources | nindent 12 }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumes: + - name: config-volume + configMap: + name: {{ include "common.fullname" . }}-config + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- include "common.selectorLabels" . | nindent 10 }} + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} +{{- end }} diff --git a/k8s/devops-info-python/values-bluegreen.yaml b/k8s/devops-info-python/values-bluegreen.yaml new file mode 100644 index 0000000000..00f1048fb3 --- /dev/null +++ b/k8s/devops-info-python/values-bluegreen.yaml @@ -0,0 +1,37 @@ +fullnameOverride: devops-info-python + +replicaCount: 3 + +image: + tag: "python" + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +persistence: + enabled: false + +vault: + enabled: false + +secrets: + DB_USERNAME: "admin" + DB_PASSWORD: "S3cur3P@ssw0rd" + +rollout: + enabled: true + strategy: blueGreen + blueGreen: + autoPromotionEnabled: false + autoPromotionSeconds: 0 + scaleDownDelaySeconds: 30 diff --git a/k8s/devops-info-python/values-canary-analysis.yaml b/k8s/devops-info-python/values-canary-analysis.yaml new file mode 100644 index 0000000000..1f06366529 --- /dev/null +++ b/k8s/devops-info-python/values-canary-analysis.yaml @@ -0,0 +1,53 @@ +fullnameOverride: devops-info-python + +replicaCount: 4 + +image: + tag: "python" + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +persistence: + enabled: false + +vault: + enabled: false + +secrets: + DB_USERNAME: "admin" + DB_PASSWORD: "S3cur3P@ssw0rd" + +rollout: + enabled: true + strategy: canary + canary: + steps: + - setWeight: 25 + - pause: { duration: 10s } + - analysis: + templates: + - templateName: devops-info-python-healthcheck + args: + - name: service-name + value: devops-info-python + - setWeight: 50 + - pause: { duration: 10s } + - setWeight: 75 + - pause: { duration: 10s } + - setWeight: 100 + analysis: + enabled: true + interval: 10s + count: 3 + failureLimit: 1 diff --git a/k8s/devops-info-python/values-canary.yaml b/k8s/devops-info-python/values-canary.yaml new file mode 100644 index 0000000000..51d649c2f7 --- /dev/null +++ b/k8s/devops-info-python/values-canary.yaml @@ -0,0 +1,44 @@ +fullnameOverride: devops-info-python + +replicaCount: 5 + +image: + tag: "python" + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +persistence: + enabled: false + +vault: + enabled: false + +secrets: + DB_USERNAME: "admin" + DB_PASSWORD: "S3cur3P@ssw0rd" + +rollout: + enabled: true + strategy: canary + canary: + steps: + - setWeight: 20 + - pause: {} + - setWeight: 40 + - pause: { duration: 30s } + - setWeight: 60 + - pause: { duration: 30s } + - setWeight: 80 + - pause: { duration: 30s } + - setWeight: 100 diff --git a/k8s/devops-info-python/values-dev.yaml b/k8s/devops-info-python/values-dev.yaml new file mode 100644 index 0000000000..05fa8ec39d --- /dev/null +++ b/k8s/devops-info-python/values-dev.yaml @@ -0,0 +1,37 @@ +replicaCount: 2 + +image: + tag: "python" + pullPolicy: IfNotPresent + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +service: + type: NodePort + port: 80 + targetPort: 8080 + nodePort: 30081 + +livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 5 + +readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + failureThreshold: 5 diff --git a/k8s/devops-info-python/values-init.yaml b/k8s/devops-info-python/values-init.yaml new file mode 100644 index 0000000000..a5a5c86881 --- /dev/null +++ b/k8s/devops-info-python/values-init.yaml @@ -0,0 +1,52 @@ +# Lab 16 — Init containers demo (download + wait-for-service patterns). +# +# Usage: +# kubectl create namespace init-demo +# helm install init-demo k8s/devops-info-python \ +# -n init-demo \ +# -f k8s/devops-info-python/values.yaml \ +# -f k8s/devops-info-python/values-init.yaml +# +# Disables persistence/vault/rollouts/statefulset to keep the demo focused on +# the init container behavior alone. + +fullnameOverride: devops-info-python + +replicaCount: 1 + +# ClusterIP only — avoid colliding with the lab12 deployment that already +# binds nodePort 30080 in the default namespace. +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +# Force a pull of busybox + main image only — no Vault sidecar, no PVC. +vault: + enabled: false +persistence: + enabled: false +rollout: + enabled: false +statefulset: + enabled: false +serviceMonitor: + enabled: false + +initContainers: + enabled: true + download: + enabled: true + image: busybox:1.36 + # https URL the init container fetches via wget into the shared emptyDir. + url: "https://example.com" + targetPath: "/work-dir/index.html" + mountPath: "/work-dir" + wait: + enabled: true + image: busybox:1.36 + # Service the second init container waits for via DNS resolution. + # Resolvable from inside the pod — kube-prometheus-stack must be installed first. + service: "monitoring-grafana.monitoring.svc.cluster.local" + timeoutSeconds: 120 + mainMountPath: "/work-dir" diff --git a/k8s/devops-info-python/values-monitoring.yaml b/k8s/devops-info-python/values-monitoring.yaml new file mode 100644 index 0000000000..3a5a210909 --- /dev/null +++ b/k8s/devops-info-python/values-monitoring.yaml @@ -0,0 +1,25 @@ +# Lab 16 — App deployment with ServiceMonitor for Prometheus Operator. +# +# Usage: +# helm upgrade --install devops-info-python k8s/devops-info-python \ +# -n default \ +# -f k8s/devops-info-python/values.yaml \ +# -f k8s/devops-info-python/values-monitoring.yaml +# +# Activates the ServiceMonitor CRD so Prometheus (installed by +# kube-prometheus-stack as `monitoring`) starts scraping `/metrics`. + +fullnameOverride: devops-info-python + +# Vault is not installed in lab16 environment. +vault: + enabled: false + +serviceMonitor: + enabled: true + release: monitoring # must match kube-prometheus-stack release name + endpoint: + port: http # named port in service.yaml + path: /metrics + interval: 15s + scrapeTimeout: 10s diff --git a/k8s/devops-info-python/values-prod.yaml b/k8s/devops-info-python/values-prod.yaml new file mode 100644 index 0000000000..3d17ecc896 --- /dev/null +++ b/k8s/devops-info-python/values-prod.yaml @@ -0,0 +1,36 @@ +replicaCount: 5 + +image: + tag: "python" + pullPolicy: IfNotPresent + +resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "512Mi" + cpu: "500m" + +service: + type: LoadBalancer + port: 80 + targetPort: 8080 + +livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 3 + timeoutSeconds: 2 + failureThreshold: 3 diff --git a/k8s/devops-info-python/values-statefulset-ondelete.yaml b/k8s/devops-info-python/values-statefulset-ondelete.yaml new file mode 100644 index 0000000000..a791e5518c --- /dev/null +++ b/k8s/devops-info-python/values-statefulset-ondelete.yaml @@ -0,0 +1,5 @@ +statefulset: + enabled: true + podManagementPolicy: OrderedReady + updateStrategy: + type: OnDelete diff --git a/k8s/devops-info-python/values-statefulset-partition.yaml b/k8s/devops-info-python/values-statefulset-partition.yaml new file mode 100644 index 0000000000..768d5e0c1b --- /dev/null +++ b/k8s/devops-info-python/values-statefulset-partition.yaml @@ -0,0 +1,6 @@ +statefulset: + enabled: true + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + partition: 2 diff --git a/k8s/devops-info-python/values-statefulset.yaml b/k8s/devops-info-python/values-statefulset.yaml new file mode 100644 index 0000000000..c7cbc0d48d --- /dev/null +++ b/k8s/devops-info-python/values-statefulset.yaml @@ -0,0 +1,51 @@ +fullnameOverride: devops-info-python + +replicaCount: 3 + +image: + tag: "python" + pullPolicy: IfNotPresent + +env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: DEBUG + value: "False" + - name: VISITS_FILE + value: "/data/visits" + +resources: + requests: + memory: "64Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +persistence: + enabled: true + size: 100Mi + storageClass: standard + +vault: + enabled: false + +secrets: + DB_USERNAME: "admin" + DB_PASSWORD: "S3cur3P@ssw0rd" + +rollout: + enabled: false + +statefulset: + enabled: true + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate diff --git a/k8s/devops-info-python/values.yaml b/k8s/devops-info-python/values.yaml new file mode 100644 index 0000000000..5845c5476a --- /dev/null +++ b/k8s/devops-info-python/values.yaml @@ -0,0 +1,144 @@ +replicaCount: 3 + +image: + repository: aezuraa/devops-info-service + tag: "python" + pullPolicy: IfNotPresent + +service: + type: NodePort + port: 80 + targetPort: 8080 + nodePort: 30080 + +containerPort: 8080 + +env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: DEBUG + value: "False" + +resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" + +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + +livenessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 3 + timeoutSeconds: 2 + failureThreshold: 3 + +secrets: + DB_USERNAME: "placeholder" + DB_PASSWORD: "placeholder" + +vault: + enabled: true + role: "devops-info" + secretPath: "secret/data/devops-info/config" + template: | + {{- with secret "secret/data/devops-info/config" -}} + DB_USERNAME={{ .Data.data.username }} + DB_PASSWORD={{ .Data.data.password }} + API_KEY={{ .Data.data.api_key }} + {{- end -}} + +config: + environment: "dev" + logLevel: "INFO" + appName: "devops-info-python" + +persistence: + enabled: true + size: 100Mi + storageClass: "" + +nameOverride: "" +fullnameOverride: "" + +rollout: + enabled: false + strategy: canary + canary: + steps: + - setWeight: 20 + - pause: {} + - setWeight: 40 + - pause: { duration: 30s } + - setWeight: 60 + - pause: { duration: 30s } + - setWeight: 80 + - pause: { duration: 30s } + - setWeight: 100 + blueGreen: + autoPromotionEnabled: false + autoPromotionSeconds: 0 + scaleDownDelaySeconds: 30 + analysis: + enabled: false + interval: 10s + count: 3 + failureLimit: 1 + +statefulset: + enabled: false + podManagementPolicy: OrderedReady + updateStrategy: + type: RollingUpdate + # partition: 0 # uncomment to freeze pods with ordinal < N + +# Lab 16 — init containers demo (download + wait-for-service patterns). +# Gated so existing scenarios (rollout / statefulset / plain deployment) are unaffected. +initContainers: + enabled: false + download: + enabled: true + image: busybox:1.36 + url: "https://example.com" + targetPath: "/work-dir/index.html" + mountPath: "/work-dir" + wait: + enabled: true + image: busybox:1.36 + # Service to wait for (FQDN or short name resolvable from the pod). + service: "monitoring-grafana.monitoring.svc.cluster.local" + timeoutSeconds: 120 + # Mount path inside the main container where downloaded artifacts appear. + mainMountPath: "/work-dir" + +# Lab 16 — ServiceMonitor CRD for Prometheus Operator scraping. +# `release` label must match the kube-prometheus-stack Helm release name +# (default selector: `release: monitoring`). +serviceMonitor: + enabled: false + release: monitoring + endpoint: + port: http + path: /metrics + interval: 15s + scrapeTimeout: 10s diff --git a/k8s/docs/screenshots/app_accessible.png b/k8s/docs/screenshots/app_accessible.png new file mode 100644 index 0000000000..aaea539651 Binary files /dev/null and b/k8s/docs/screenshots/app_accessible.png differ diff --git a/k8s/docs/screenshots/both_apps_deployed.png b/k8s/docs/screenshots/both_apps_deployed.png new file mode 100644 index 0000000000..8aa89d2717 Binary files /dev/null and b/k8s/docs/screenshots/both_apps_deployed.png differ diff --git a/k8s/docs/screenshots/cluster_setup.png b/k8s/docs/screenshots/cluster_setup.png new file mode 100644 index 0000000000..26cb872ca5 Binary files /dev/null and b/k8s/docs/screenshots/cluster_setup.png differ diff --git a/k8s/docs/screenshots/config-json-inside-pod.png b/k8s/docs/screenshots/config-json-inside-pod.png new file mode 100644 index 0000000000..403c539093 Binary files /dev/null and b/k8s/docs/screenshots/config-json-inside-pod.png differ diff --git a/k8s/docs/screenshots/configmap-hot-reload.png b/k8s/docs/screenshots/configmap-hot-reload.png new file mode 100644 index 0000000000..17b17b0b3f Binary files /dev/null and b/k8s/docs/screenshots/configmap-hot-reload.png differ diff --git a/k8s/docs/screenshots/configmap-pvc-list.png b/k8s/docs/screenshots/configmap-pvc-list.png new file mode 100644 index 0000000000..98513ac652 Binary files /dev/null and b/k8s/docs/screenshots/configmap-pvc-list.png differ diff --git a/k8s/docs/screenshots/curl_app_response.png b/k8s/docs/screenshots/curl_app_response.png new file mode 100644 index 0000000000..dbeea62859 Binary files /dev/null and b/k8s/docs/screenshots/curl_app_response.png differ diff --git a/k8s/docs/screenshots/describe_deployment.png b/k8s/docs/screenshots/describe_deployment.png new file mode 100644 index 0000000000..8e2f2c83e2 Binary files /dev/null and b/k8s/docs/screenshots/describe_deployment.png differ diff --git a/k8s/docs/screenshots/dev_deployment.png b/k8s/docs/screenshots/dev_deployment.png new file mode 100644 index 0000000000..073ec4bcbf Binary files /dev/null and b/k8s/docs/screenshots/dev_deployment.png differ diff --git a/k8s/docs/screenshots/docker-visits-test.png b/k8s/docs/screenshots/docker-visits-test.png new file mode 100644 index 0000000000..8b1cfcd850 Binary files /dev/null and b/k8s/docs/screenshots/docker-visits-test.png differ diff --git a/k8s/docs/screenshots/env-vars-inside-pod.png b/k8s/docs/screenshots/env-vars-inside-pod.png new file mode 100644 index 0000000000..6962dac406 Binary files /dev/null and b/k8s/docs/screenshots/env-vars-inside-pod.png differ diff --git a/k8s/docs/screenshots/helm_dry_run.png b/k8s/docs/screenshots/helm_dry_run.png new file mode 100644 index 0000000000..5f6aae6d38 Binary files /dev/null and b/k8s/docs/screenshots/helm_dry_run.png differ diff --git a/k8s/docs/screenshots/helm_list.png b/k8s/docs/screenshots/helm_list.png new file mode 100644 index 0000000000..dfd80ec5fa Binary files /dev/null and b/k8s/docs/screenshots/helm_list.png differ diff --git a/k8s/docs/screenshots/helm_template.png b/k8s/docs/screenshots/helm_template.png new file mode 100644 index 0000000000..c81088a03d Binary files /dev/null and b/k8s/docs/screenshots/helm_template.png differ diff --git a/k8s/docs/screenshots/hooks_executed.png b/k8s/docs/screenshots/hooks_executed.png new file mode 100644 index 0000000000..8d995117cf Binary files /dev/null and b/k8s/docs/screenshots/hooks_executed.png differ diff --git a/k8s/docs/screenshots/ingress_https_app1.png b/k8s/docs/screenshots/ingress_https_app1.png new file mode 100644 index 0000000000..9aa60b7968 Binary files /dev/null and b/k8s/docs/screenshots/ingress_https_app1.png differ diff --git a/k8s/docs/screenshots/ingress_https_app2.png b/k8s/docs/screenshots/ingress_https_app2.png new file mode 100644 index 0000000000..81e8659c82 Binary files /dev/null and b/k8s/docs/screenshots/ingress_https_app2.png differ diff --git a/k8s/docs/screenshots/ingress_status.png b/k8s/docs/screenshots/ingress_status.png new file mode 100644 index 0000000000..ebce34d710 Binary files /dev/null and b/k8s/docs/screenshots/ingress_status.png differ diff --git a/k8s/docs/screenshots/kubectl_get_all.png b/k8s/docs/screenshots/kubectl_get_all.png new file mode 100644 index 0000000000..6fc81058bc Binary files /dev/null and b/k8s/docs/screenshots/kubectl_get_all.png differ diff --git a/k8s/docs/screenshots/kubectl_get_all_2.png b/k8s/docs/screenshots/kubectl_get_all_2.png new file mode 100644 index 0000000000..c0019b6a3a Binary files /dev/null and b/k8s/docs/screenshots/kubectl_get_all_2.png differ diff --git a/k8s/docs/screenshots/pod-delete.png b/k8s/docs/screenshots/pod-delete.png new file mode 100644 index 0000000000..58d8f294f6 Binary files /dev/null and b/k8s/docs/screenshots/pod-delete.png differ diff --git a/k8s/docs/screenshots/pods_running.png b/k8s/docs/screenshots/pods_running.png new file mode 100644 index 0000000000..e2ab001038 Binary files /dev/null and b/k8s/docs/screenshots/pods_running.png differ diff --git a/k8s/docs/screenshots/prod_deployment.png b/k8s/docs/screenshots/prod_deployment.png new file mode 100644 index 0000000000..6b575a7ba1 Binary files /dev/null and b/k8s/docs/screenshots/prod_deployment.png differ diff --git a/k8s/docs/screenshots/secret_describe_pod.png b/k8s/docs/screenshots/secret_describe_pod.png new file mode 100644 index 0000000000..7615f92958 Binary files /dev/null and b/k8s/docs/screenshots/secret_describe_pod.png differ diff --git a/k8s/docs/screenshots/vault_pods_running.png b/k8s/docs/screenshots/vault_pods_running.png new file mode 100644 index 0000000000..dfbc20af89 Binary files /dev/null and b/k8s/docs/screenshots/vault_pods_running.png differ diff --git a/k8s/docs/screenshots/vault_secret_injected.png b/k8s/docs/screenshots/vault_secret_injected.png new file mode 100644 index 0000000000..3f8dac9025 Binary files /dev/null and b/k8s/docs/screenshots/vault_secret_injected.png differ diff --git a/k8s/docs/screenshots/visits-after-delete.png b/k8s/docs/screenshots/visits-after-delete.png new file mode 100644 index 0000000000..18718fab7a Binary files /dev/null and b/k8s/docs/screenshots/visits-after-delete.png differ diff --git a/k8s/docs/screenshots/visits-before-delete.png b/k8s/docs/screenshots/visits-before-delete.png new file mode 100644 index 0000000000..ca1dc2754a Binary files /dev/null and b/k8s/docs/screenshots/visits-before-delete.png differ diff --git a/k8s/ingress.yml b/k8s/ingress.yml new file mode 100644 index 0000000000..da2a808479 --- /dev/null +++ b/k8s/ingress.yml @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: apps-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + tls: + - hosts: + - local.example.com + secretName: tls-secret + rules: + - host: local.example.com + http: + paths: + - path: /app1 + pathType: Prefix + backend: + service: + name: devops-info-python-service + port: + number: 80 + - path: /app2 + pathType: Prefix + backend: + service: + name: devops-info-go-service + port: + number: 80 diff --git a/k8s/screenshots/lab14/01-dashboard-overview.png b/k8s/screenshots/lab14/01-dashboard-overview.png new file mode 100644 index 0000000000..43156177ac Binary files /dev/null and b/k8s/screenshots/lab14/01-dashboard-overview.png differ diff --git a/k8s/screenshots/lab14/02-canary-paused-20.png b/k8s/screenshots/lab14/02-canary-paused-20.png new file mode 100644 index 0000000000..9329492593 Binary files /dev/null and b/k8s/screenshots/lab14/02-canary-paused-20.png differ diff --git a/k8s/screenshots/lab14/03-canary-promoted.png b/k8s/screenshots/lab14/03-canary-promoted.png new file mode 100644 index 0000000000..a01aa6c470 Binary files /dev/null and b/k8s/screenshots/lab14/03-canary-promoted.png differ diff --git a/k8s/screenshots/lab14/04-canary-aborted.png b/k8s/screenshots/lab14/04-canary-aborted.png new file mode 100644 index 0000000000..d2d4c48be0 Binary files /dev/null and b/k8s/screenshots/lab14/04-canary-aborted.png differ diff --git a/k8s/screenshots/lab14/05-bluegreen-preview.png b/k8s/screenshots/lab14/05-bluegreen-preview.png new file mode 100644 index 0000000000..e1d9c04a3e Binary files /dev/null and b/k8s/screenshots/lab14/05-bluegreen-preview.png differ diff --git a/k8s/screenshots/lab14/06-bluegreen-promoted.png b/k8s/screenshots/lab14/06-bluegreen-promoted.png new file mode 100644 index 0000000000..1f4e045365 Binary files /dev/null and b/k8s/screenshots/lab14/06-bluegreen-promoted.png differ diff --git a/k8s/screenshots/lab14/07-bluegreen-rollback.png b/k8s/screenshots/lab14/07-bluegreen-rollback.png new file mode 100644 index 0000000000..bc117318ac Binary files /dev/null and b/k8s/screenshots/lab14/07-bluegreen-rollback.png differ diff --git a/k8s/screenshots/lab14/08-analysis-success.png b/k8s/screenshots/lab14/08-analysis-success.png new file mode 100644 index 0000000000..501ee55e4e Binary files /dev/null and b/k8s/screenshots/lab14/08-analysis-success.png differ diff --git a/k8s/screenshots/lab14/09-analysis-auto-rollback.png b/k8s/screenshots/lab14/09-analysis-auto-rollback.png new file mode 100644 index 0000000000..9ada78fd1d Binary files /dev/null and b/k8s/screenshots/lab14/09-analysis-auto-rollback.png differ diff --git a/k8s/screenshots/lab15/01-resources.png b/k8s/screenshots/lab15/01-resources.png new file mode 100644 index 0000000000..6baafe4ea2 Binary files /dev/null and b/k8s/screenshots/lab15/01-resources.png differ diff --git a/k8s/screenshots/lab15/02-dns-resolution.png b/k8s/screenshots/lab15/02-dns-resolution.png new file mode 100644 index 0000000000..6562439fa8 Binary files /dev/null and b/k8s/screenshots/lab15/02-dns-resolution.png differ diff --git a/k8s/screenshots/lab15/03-per-pod-visits.png b/k8s/screenshots/lab15/03-per-pod-visits.png new file mode 100644 index 0000000000..199ccc10de Binary files /dev/null and b/k8s/screenshots/lab15/03-per-pod-visits.png differ diff --git a/k8s/screenshots/lab15/04-persistence-test.png b/k8s/screenshots/lab15/04-persistence-test.png new file mode 100644 index 0000000000..1022841acc Binary files /dev/null and b/k8s/screenshots/lab15/04-persistence-test.png differ diff --git a/k8s/screenshots/lab15/05-partition.png b/k8s/screenshots/lab15/05-partition.png new file mode 100644 index 0000000000..1449372bb3 Binary files /dev/null and b/k8s/screenshots/lab15/05-partition.png differ diff --git a/k8s/screenshots/lab15/06-ondelete.png b/k8s/screenshots/lab15/06-ondelete.png new file mode 100644 index 0000000000..7ea3d18ad9 Binary files /dev/null and b/k8s/screenshots/lab15/06-ondelete.png differ diff --git a/k8s/screenshots/lab16/01-stack-pods-svc.png b/k8s/screenshots/lab16/01-stack-pods-svc.png new file mode 100644 index 0000000000..156b3fdf0f Binary files /dev/null and b/k8s/screenshots/lab16/01-stack-pods-svc.png differ diff --git a/k8s/screenshots/lab16/02-q1-statefulset-pod-resources.png b/k8s/screenshots/lab16/02-q1-statefulset-pod-resources.png new file mode 100644 index 0000000000..2629979d60 Binary files /dev/null and b/k8s/screenshots/lab16/02-q1-statefulset-pod-resources.png differ diff --git a/k8s/screenshots/lab16/03-q2-namespace-cpu.png b/k8s/screenshots/lab16/03-q2-namespace-cpu.png new file mode 100644 index 0000000000..cfe7f916e3 Binary files /dev/null and b/k8s/screenshots/lab16/03-q2-namespace-cpu.png differ diff --git a/k8s/screenshots/lab16/04-q3-node-metrics.png b/k8s/screenshots/lab16/04-q3-node-metrics.png new file mode 100644 index 0000000000..a14faf0933 Binary files /dev/null and b/k8s/screenshots/lab16/04-q3-node-metrics.png differ diff --git a/k8s/screenshots/lab16/05-q4-kubelet-counts.png b/k8s/screenshots/lab16/05-q4-kubelet-counts.png new file mode 100644 index 0000000000..7f29946b00 Binary files /dev/null and b/k8s/screenshots/lab16/05-q4-kubelet-counts.png differ diff --git a/k8s/screenshots/lab16/06-q5-network.png b/k8s/screenshots/lab16/06-q5-network.png new file mode 100644 index 0000000000..01d1f1eefa Binary files /dev/null and b/k8s/screenshots/lab16/06-q5-network.png differ diff --git a/k8s/screenshots/lab16/07-q6-alertmanager.png b/k8s/screenshots/lab16/07-q6-alertmanager.png new file mode 100644 index 0000000000..40761f235e Binary files /dev/null and b/k8s/screenshots/lab16/07-q6-alertmanager.png differ diff --git a/k8s/screenshots/lab16/08-init-containers.png b/k8s/screenshots/lab16/08-init-containers.png new file mode 100644 index 0000000000..288944891f Binary files /dev/null and b/k8s/screenshots/lab16/08-init-containers.png differ diff --git a/k8s/screenshots/lab16/09-prom-targets.png b/k8s/screenshots/lab16/09-prom-targets.png new file mode 100644 index 0000000000..08dfc9bb67 Binary files /dev/null and b/k8s/screenshots/lab16/09-prom-targets.png differ diff --git a/k8s/screenshots/lab16/10-prom-graph.png b/k8s/screenshots/lab16/10-prom-graph.png new file mode 100644 index 0000000000..ee701af703 Binary files /dev/null and b/k8s/screenshots/lab16/10-prom-graph.png differ diff --git a/k8s/service-go.yml b/k8s/service-go.yml new file mode 100644 index 0000000000..0fb5e3737e --- /dev/null +++ b/k8s/service-go.yml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: devops-info-go-service + labels: + app: devops-info-go +spec: + type: ClusterIP + selector: + app: devops-info-go + ports: + - protocol: TCP + port: 80 + targetPort: 8080 diff --git a/k8s/service.yml b/k8s/service.yml new file mode 100644 index 0000000000..f3999eecdf --- /dev/null +++ b/k8s/service.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: devops-info-python-service + labels: + app: devops-info-python +spec: + type: NodePort + selector: + app: devops-info-python + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + nodePort: 30080 diff --git a/lectures/lec8.md b/lectures/lec8.md index 563d095570..a4cfa59e97 100644 --- a/lectures/lec8.md +++ b/lectures/lec8.md @@ -187,6 +187,7 @@ flowchart LR ```mermaid flowchart TD + subgraph "Pull Prometheus" P1[💾 Prometheus] -->|🔄 Scrape| T1[📦 Target] P1 -->|🔄 Scrape| T2[📦 Target] diff --git a/monitoring/.env.example b/monitoring/.env.example new file mode 100644 index 0000000000..b785a8387f --- /dev/null +++ b/monitoring/.env.example @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD= diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml new file mode 100644 index 0000000000..ab0b26e1aa --- /dev/null +++ b/monitoring/docker-compose.yml @@ -0,0 +1,205 @@ +--- +services: + loki: + image: grafana/loki:3.0.0 + container_name: loki + ports: + - "3100:3100" + volumes: + - ./loki/config.yml:/etc/loki/config.yml:ro + - loki-data:/loki + command: -config.file=/etc/loki/config.yml + networks: + - logging + labels: + logging: "promtail" + app: "loki" + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3100/ready || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: '1.0' + memory: 1G + reservations: + cpus: '0.25' + memory: 256M + restart: unless-stopped + + promtail: + image: grafana/promtail:3.0.0 + container_name: promtail + volumes: + - ./promtail/config.yml:/etc/promtail/config.yml:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + command: -config.file=/etc/promtail/config.yml + networks: + - logging + labels: + logging: "promtail" + app: "promtail" + depends_on: + loki: + condition: service_healthy + deploy: + resources: + limits: + cpus: '0.5' + memory: 512M + reservations: + cpus: '0.1' + memory: 128M + restart: unless-stopped + + prometheus: + image: prom/prometheus:v3.9.0 + container_name: prometheus + ports: + - "9090:9090" + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.retention.time=15d' + - '--storage.tsdb.retention.size=10GB' + networks: + - logging + labels: + logging: "promtail" + app: "prometheus" + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9090/-/healthy || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s + deploy: + resources: + limits: + cpus: '1.0' + memory: 1G + reservations: + cpus: '0.25' + memory: 256M + restart: unless-stopped + + grafana: + image: grafana/grafana:12.3.1 + container_name: grafana + ports: + - "3000:3000" + volumes: + - grafana-data:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning:ro + - ./grafana/dashboards:/var/lib/grafana/dashboards:ro + environment: + - GF_AUTH_ANONYMOUS_ENABLED=false + - GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER} + - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD} + - GF_SECURITY_ALLOW_EMBEDDING=true + networks: + - logging + labels: + logging: "promtail" + app: "grafana" + depends_on: + loki: + condition: service_healthy + prometheus: + condition: service_healthy + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/health || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 15s + deploy: + resources: + limits: + cpus: '0.5' + memory: 512M + reservations: + cpus: '0.25' + memory: 128M + restart: unless-stopped + + app-python: + build: + context: ../app_python + image: aezuraa/devops-info-service:python + container_name: app-python + ports: + - "8000:8080" + environment: + - HOST=0.0.0.0 + - PORT=8080 + volumes: + - app-python-data:/data + networks: + - logging + labels: + logging: "promtail" + app: "devops-python" + healthcheck: + test: ["CMD-SHELL", "python -c \"import urllib.request; urllib.request.urlopen('http://localhost:8080/health')\" || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + deploy: + resources: + limits: + cpus: '0.5' + memory: 256M + reservations: + cpus: '0.1' + memory: 64M + restart: unless-stopped + + app-go: + build: + context: ../app_go + image: aezuraa/devops-info-service:go + container_name: app-go + ports: + - "8001:8080" + environment: + - HOST=0.0.0.0 + - PORT=8080 + volumes: + - app-go-data:/data + networks: + - logging + labels: + logging: "promtail" + app: "devops-go" + healthcheck: + test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8080/health || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + deploy: + resources: + limits: + cpus: '0.5' + memory: 256M + reservations: + cpus: '0.1' + memory: 64M + restart: unless-stopped + +volumes: + loki-data: + grafana-data: + prometheus-data: + app-python-data: + app-go-data: + +networks: + logging: + driver: bridge diff --git a/monitoring/docs/LAB07.md b/monitoring/docs/LAB07.md new file mode 100644 index 0000000000..575dceb7d6 --- /dev/null +++ b/monitoring/docs/LAB07.md @@ -0,0 +1,509 @@ +# Lab 7 — Observability & Logging with Loki Stack + +## 1. Architecture + +``` +┌─────────────┐ ┌─────────────┐ +│ app-python │ │ app-go │ +│ (Flask) │ │ (net/http) │ +│ :8000 │ │ :8001 │ +└──────┬──────┘ └──────┬──────┘ + │ stdout/stderr │ stdout/stderr + ▼ ▼ +┌──────────────────────────────────┐ +│ Promtail │ +│ Docker SD → scrape containers │ +│ with label logging=promtail │ +└──────────────┬───────────────────┘ + │ /loki/api/v1/push + ▼ +┌──────────────────────────────────┐ +│ Loki 3.0 │ +│ TSDB index + filesystem store │ +│ :3100 │ +└──────────────┬───────────────────┘ + │ LogQL queries + ▼ +┌──────────────────────────────────┐ +│ Grafana 12.3 │ +│ Dashboards & Explore │ +│ :3000 │ +└──────────────────────────────────┘ +``` + +**Data flow:** Applications write logs to stdout → Docker captures them in JSON files → Promtail discovers containers via Docker socket and tails their log files → Promtail pushes log entries to Loki over HTTP → Grafana queries Loki using LogQL and renders dashboards. + +All services communicate over a shared Docker bridge network `logging`. + +--- + +## 2. Setup Guide + +### Prerequisites + +- Docker Engine 24+ with Compose v2 +- Ports 3000, 3100, 8000, 8001 available + +### Deployment + +```bash +cd monitoring + +# Create .env from the example template and set your password +cp .env.example .env +# Edit .env and set GF_SECURITY_ADMIN_PASSWORD + +# Start the full stack (builds apps, pulls infrastructure images) +docker compose up -d --build + +# Verify all services are running +docker compose ps +``` + +**Expected output:** + +``` +NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS +app-go aezuraa/devops-info-service:go "./devops-info-servi…" app-go About a minute ago Up About a minute 0.0.0.0:8001->8080/tcp +app-python aezuraa/devops-info-service:python "python app.py" app-python About a minute ago Up About a minute 0.0.0.0:8000->8080/tcp +grafana grafana/grafana:12.3.1 "/run.sh" grafana About a minute ago Up About a minute (healthy) 0.0.0.0:3000->3000/tcp +loki grafana/loki:3.0.0 "/usr/bin/loki -conf…" loki About a minute ago Up About a minute (healthy) 0.0.0.0:3100->3100/tcp +promtail grafana/promtail:3.0.0 "/usr/bin/promtail -…" promtail About a minute ago Up About a minute +``` + +### Service verification + +```bash +# Loki readiness +curl http://localhost:3100/ready +# → ready + +# Grafana health +curl http://localhost:3000/api/health +# → {"database":"ok","version":"12.3.1",...} + +# Python app +curl http://localhost:8000/health +# → {"status":"healthy",...} + +# Go app +curl http://localhost:8001/health +# → {"status":"healthy",...} +``` + +--- + +## 3. Configuration + +### 3.1 Loki Configuration (`loki/config.yml`) + +```yaml +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + path_prefix: /loki + replication_factor: 1 + ring: + kvstore: + store: inmemory + +schema_config: + configs: + - from: "2024-01-01" + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h +``` + +**Key decisions:** + +- **TSDB index** (not boltdb-shipper) — Loki 3.0 recommended, up to 10x faster queries, lower memory +- **Schema v13** — latest schema version for Loki 3.0+ +- **Filesystem storage** — suitable for single-instance deployment +- **Retention 168h (7 days)** — configured via `limits_config` with compactor enabled +- **`auth_enabled: false`** — single-tenant mode for development + +### 3.2 Promtail Configuration (`promtail/config.yml`) + +```yaml +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_label_app'] + target_label: 'app' +``` + +**Key decisions:** + +- **Docker SD** — discovers containers automatically via Docker socket +- **Label filtering** — only scrapes containers with `logging=promtail` label (opt-in model) +- **Relabeling** — extracts container name (strips leading `/`) and `app` label for LogQL querying +- **5s refresh** — balance between responsiveness and Docker API load + +### 3.3 Docker Compose + +The compose file defines 5 services on a shared `logging` network: + +| Service | Image | Port | Role | +|---------|-------|------|------| +| loki | grafana/loki:3.0.0 | 3100 | Log storage & query engine | +| promtail | grafana/promtail:3.0.0 | 9080 (internal) | Log collector | +| grafana | grafana/grafana:12.3.1 | 3000 | Visualization | +| app-python | built from `../app_python` | 8000→8080 | Python Flask app | +| app-go | built from `../app_go` | 8001→8080 | Go app | + +--- + +## 4. Application Logging + +Logs from both applications are visible in Grafana Explore. Python app logs are in JSON format; Go app logs are plain-text — both collected by Promtail via Docker service discovery. + +**Python app logs in Loki Explore:** + +![Python app logs in Grafana](screenshots/Loki_app_python_logs.png) + +**Go app logs in Loki Explore:** + +![Go app logs in Grafana](screenshots/Loki_app_go_logs.png) + +--- + +### JSON Structured Logging (Python) + +The Python app uses a custom `JSONFormatter` that outputs each log line as a JSON object: + +```python +class JSONFormatter(logging.Formatter): + def format(self, record): + log_data = { + 'timestamp': datetime.now(timezone.utc).isoformat(), + 'level': record.levelname, + 'logger': record.name, + 'message': record.getMessage(), + } + if hasattr(record, 'method'): + log_data['method'] = record.method + # ... additional context fields + return json.dumps(log_data) +``` + +Flask hooks log every request and response: + +- `@app.before_request` — logs incoming requests with method, path, client IP +- `@app.after_request` — logs completed requests with status code +- `@app.errorhandler(404)` — logs not-found warnings +- `@app.errorhandler(500)` — logs internal server errors + +**Example JSON log output:** + +```json +{"timestamp": "2026-03-06T12:38:42.354796+00:00", "level": "INFO", "logger": "__main__", "message": "Incoming request", "method": "GET", "path": "/", "client_ip": "192.168.65.1"} +{"timestamp": "2026-03-06T12:38:42.355255+00:00", "level": "INFO", "logger": "__main__", "message": "Request completed", "method": "GET", "path": "/", "status_code": 200, "client_ip": "192.168.65.1"} +{"timestamp": "2026-03-06T12:38:42.862278+00:00", "level": "WARNING", "logger": "__main__", "message": "Not found", "method": "GET", "path": "/nonexistent", "status_code": 404, "client_ip": "192.168.65.1"} +``` + +**Benefits of JSON logging:** + +- Loki can parse fields with `| json` pipeline stage +- Enables filtering by any field: `{app="devops-python"} | json | status_code=404` +- No ambiguous text parsing needed + +### Go App Logging + +The Go app uses standard `log.Printf` with plain-text format. Promtail ingests these logs and applies the container/app labels. + +--- + +## 5. Dashboard + +The Grafana dashboard "Application Logs Dashboard" contains 4 panels. Screenshot showing all 4 panels with real data: + +![Application Logs Dashboard](screenshots/All_apps_logs.png) + + +### Panel 1: Logs Table — All Apps + +- **Type:** Logs visualization +- **Query:** `{app=~"devops-.*"}` +- **Purpose:** Shows all recent log entries from both applications with timestamps and labels + +### Panel 2: Request Rate by App + +- **Type:** Time series graph +- **Query:** `sum by (app) (rate({app=~"devops-.*"} [1m]))` +- **Purpose:** Visualizes logs per second, broken down by application. Useful for spotting traffic patterns and anomalies. + +### Panel 3: Error Logs + +- **Type:** Logs visualization +- **Query:** `{app=~"devops-.*"} | json | level="ERROR" or level="WARNING"` +- **Purpose:** Filters and displays only error/warning-level log entries for quick incident identification + +### Panel 4: Log Level Distribution + +- **Type:** Pie chart +- **Query:** `sum by (level) (count_over_time({app=~"devops-.*"} | json [5m]))` +- **Purpose:** Shows the proportion of log levels (INFO, WARNING, ERROR) over the last 5 minutes + +### Additional LogQL Query Examples + +```logql +# All logs from Python app +{app="devops-python"} + +# Only error logs +{app="devops-python"} |= "ERROR" + +# Parse JSON and filter by HTTP method +{app="devops-python"} | json | method="GET" + +# Filter by HTTP status code +{app="devops-python"} | json | status_code=404 + +# Filter by path +{app="devops-python"} | json | path="/health" + +# Logs per second by app +sum by (app) (rate({app=~"devops-.*"} [1m])) + +# Count logs by level in last 5 minutes +sum by (level) (count_over_time({app=~"devops-.*"} | json [5m])) + +# Regex match on container +{container=~"app-.*"} +``` + +--- + +## 6. Production Configuration + +### 6.1 Resource Limits + +All services have CPU/memory limits to prevent resource exhaustion: + +| Service | CPU Limit | Memory Limit | CPU Reserved | Memory Reserved | +|---------|-----------|--------------|--------------|-----------------| +| Loki | 1.0 | 1G | 0.25 | 256M | +| Promtail | 0.5 | 512M | 0.1 | 128M | +| Grafana | 1.0 | 512M | 0.25 | 128M | +| app-python | 0.5 | 256M | 0.1 | 64M | +| app-go | 0.5 | 256M | 0.1 | 64M | + +### 6.2 Security + +- **Grafana anonymous access disabled:** `GF_AUTH_ANONYMOUS_ENABLED=false` +- **Admin credentials via `.env` file:** Password not hardcoded in `docker-compose.yml` +- **`.env` not committed to git** — listed in `.gitignore`; use `.env.example` as a template +- **Ansible Vault** — in the Ansible role, `grafana_admin_password` references `vault_grafana_admin_password` from the encrypted vault +- **Docker socket mounted read-only** for Promtail: `/var/run/docker.sock:ro` +- **Config files mounted read-only:** `:ro` flag on Loki and Promtail configs + +Grafana login page (anonymous access is disabled — login required): + +![Grafana login page](screenshots/login.png) + +### 6.3 Health Checks + +- **Loki:** `wget --spider http://localhost:3100/ready` (interval: 10s, retries: 5, start_period: 20s) +- **Grafana:** `wget --spider http://localhost:3000/api/health` (interval: 10s, retries: 5, start_period: 15s) +- **Dependency chain:** Promtail and Grafana depend on `loki: service_healthy` + +### 6.4 Retention + +- Log retention: 7 days (168h) +- Compactor runs every 10 minutes to clean expired data +- Old samples rejected after 168h + +--- + +## 7. Testing + +### Verify services + +```bash +# All services running and healthy +docker compose ps + +# Loki ready +curl http://localhost:3100/ready + +# Grafana healthy +curl http://localhost:3000/api/health + +# Loki labels populated +curl http://localhost:3100/loki/api/v1/labels +``` + +### Generate test traffic + +```bash +# Python app — normal requests +for i in {1..20}; do curl -s http://localhost:8000/; done +for i in {1..20}; do curl -s http://localhost:8000/health; done + +# Python app — 404 errors +for i in {1..5}; do curl -s http://localhost:8000/nonexistent; done + +# Go app +for i in {1..20}; do curl -s http://localhost:8001/; done +for i in {1..20}; do curl -s http://localhost:8001/health; done +``` + +### Query logs via API + +```bash +# Python app logs +curl -s 'http://localhost:3100/loki/api/v1/query?query={app="devops-python"}&limit=5' + +# Go app logs +curl -s 'http://localhost:3100/loki/api/v1/query?query={app="devops-go"}&limit=5' + +# JSON-parsed filter +curl -s 'http://localhost:3100/loki/api/v1/query?query={app="devops-python"}|json|method="GET"&limit=5' +``` + +### Verify in Grafana + +1. Open http://localhost:3000 (login: admin / from .env) +2. Go to **Explore** → select **Loki** data source +3. Run query: `{app=~"devops-.*"}` +4. Open **Dashboards** → "Application Logs Dashboard" +5. Verify all 4 panels show data + +### Tear down + +```bash +docker compose down -v +``` + +--- + +## 8. Challenges & Solutions + +### Challenge 1: Loki 3.0 TSDB Configuration + +Loki 3.0 introduced TSDB as the default index type, replacing boltdb-shipper. The configuration structure changed significantly — `tsdb_shipper` requires `active_index_directory` and `cache_location` instead of the old boltdb paths. + +**Solution:** Used the latest Loki 3.0 configuration docs with schema v13 and TSDB store type. + +### Challenge 2: Promtail Docker Service Discovery Filtering + +By default, Promtail would scrape all containers including infrastructure (loki, promtail, grafana). This creates noisy self-referential logging loops. + +**Solution:** Used Docker label filtering (`logging=promtail`) in Promtail's `docker_sd_configs` so only explicitly labeled containers are scraped. + +### Challenge 3: Flask Werkzeug Default Logger + +Flask's built-in Werkzeug logger outputs its own access log lines in a non-JSON format, polluting the structured log stream. + +**Solution:** Suppressed Werkzeug's default handler and set it to WARNING level, letting our custom `@app.before_request`/`@app.after_request` hooks handle structured request logging exclusively. + +### Challenge 4: Grafana Data Source Provisioning + +Manually configuring the Loki data source in Grafana UI is not reproducible. + +**Solution:** Used Grafana's REST API (`POST /api/datasources`) to programmatically add the Loki data source. The Ansible role also automates this step. + +--- + +## Bonus: Ansible Automation + +### Role Structure + +``` +ansible/roles/monitoring/ +├── defaults/main.yml # Parameterized variables (versions, ports, limits) +├── tasks/ +│ ├── main.yml # Orchestration entry point +│ ├── setup.yml # Create dirs, template configs +│ └── deploy.yml # Docker compose deploy, health checks, datasource +├── templates/ +│ ├── docker-compose.yml.j2 +│ ├── loki-config.yml.j2 +│ └── promtail-config.yml.j2 +└── meta/main.yml # Depends on: docker role +``` + +### Key Variables + +```yaml +loki_version: "3.0.0" +promtail_version: "3.0.0" +grafana_version: "12.3.1" +loki_retention_period: "168h" +loki_schema_version: "v13" +grafana_admin_user: "admin" +grafana_admin_password: "{{ vault_grafana_admin_password }}" # stored in Ansible Vault +``` + +Sensitive values (`vault_grafana_admin_password`) are stored in the encrypted vault file `inventory/group_vars/all.yml` and never committed in plaintext. + +### Playbook Usage + +```bash +# Deploy monitoring stack +ansible-playbook ansible/playbooks/deploy-monitoring.yml + +# Idempotency test — second run shows no changes +ansible-playbook ansible/playbooks/deploy-monitoring.yml +``` + +The role is idempotent: templates only trigger redeployment when config content changes, and the Grafana datasource creation accepts 409 (already exists) as success. + +### Playbook Execution Output + +First run — 5 tasks changed (dirs created, configs templated, stack deployed): + +``` +PLAY [Deploy Monitoring Stack (Loki + Promtail + Grafana)] ********************* + +TASK [monitoring : Create monitoring directories] ****************************** +changed: [lab04-vm] => (item=/opt/monitoring) +changed: [lab04-vm] => (item=/opt/monitoring/loki) +changed: [lab04-vm] => (item=/opt/monitoring/promtail) + +TASK [monitoring : Template Loki configuration] ******************************** +changed: [lab04-vm] + +TASK [monitoring : Template Promtail configuration] **************************** +changed: [lab04-vm] + +TASK [monitoring : Template Docker Compose file] ******************************* +changed: [lab04-vm] + +TASK [monitoring : Deploy monitoring stack] ************************************ +changed: [lab04-vm] + +TASK [monitoring : Wait for Loki to be ready] ********************************** +ok: [lab04-vm] # content: "ready" + +TASK [monitoring : Wait for Grafana to be ready] ******************************* +ok: [lab04-vm] # {"database":"ok","version":"12.3.1"} + +TASK [monitoring : Configure Loki data source in Grafana] ********************** +ok: [lab04-vm] # Datasource added: Loki → http://loki:3100 + +TASK [monitoring : Display deployment status] ********************************** +ok: [lab04-vm] => + "msg": "Monitoring stack deployed successfully. + Grafana: http://84.201.130.19:3000 + Loki: http://84.201.130.19:3100" + +PLAY RECAP ************************************************************* +lab04-vm : ok=21 changed=5 unreachable=0 failed=0 skipped=0 +``` diff --git a/monitoring/docs/LAB08.md b/monitoring/docs/LAB08.md new file mode 100644 index 0000000000..c7def647c1 --- /dev/null +++ b/monitoring/docs/LAB08.md @@ -0,0 +1,450 @@ +# Lab 8 — Metrics & Monitoring with Prometheus + +## 1. Architecture + +``` +┌─────────────┐ +│ app-python │ +│ Flask :8080 │──── /metrics ────┐ +└──────────────┘ │ + ▼ + ┌────────────────┐ + │ Prometheus │ + │ scrape 15s │ + │ TSDB :9090 │ + └───────┬────────┘ + │ PromQL + ▼ +┌─────────┐ LogQL ┌──────────────────┐ +│ Loki │◄─────────│ Grafana │ +│ :3100 │ │ Dashboards :3000│ +└─────────┘ └──────────────────┘ +``` + +**Metric flow:** The Python app exposes a `/metrics` endpoint using `prometheus_client`. Prometheus scrapes this endpoint every 15 seconds alongside its own metrics, Loki metrics, and Grafana metrics. Grafana queries Prometheus via PromQL and renders dashboards. This complements the logging pipeline from Lab 7 (app → Promtail → Loki → Grafana). + +**Key difference — Logs vs Metrics:** + +| Aspect | Logs (Lab 7) | Metrics (Lab 8) | +|--------|-------------|-----------------| +| Data type | Text events | Numeric time-series | +| Collection | Push (Promtail → Loki) | Pull (Prometheus scrapes) | +| Query language | LogQL | PromQL | +| Best for | Debugging, audit trail | Alerting, trends, capacity | +| Cardinality | High (every event) | Low (aggregated counters) | + +--- + +## 2. Application Instrumentation + +### Metrics Defined + +The app implements the **RED method** (Rate, Errors, Duration): + +| Metric | Type | Labels | Purpose | +|--------|------|--------|---------| +| `http_requests_total` | Counter | `method`, `endpoint`, `status` | Total request count (Rate & Errors) | +| `http_request_duration_seconds` | Histogram | `method`, `endpoint` | Request latency distribution (Duration) | +| `http_requests_in_progress` | Gauge | — | Concurrent request count | +| `devops_info_endpoint_calls_total` | Counter | `endpoint` | Business-level endpoint usage tracking | +| `devops_info_system_collection_seconds` | Histogram | — | Time spent collecting system info | + +### Why These Metrics + +- **Counter (`http_requests_total`)** — monotonically increasing, ideal for computing `rate()` (requests/second) and filtering by status code for error rates. +- **Histogram (`http_request_duration_seconds`)** — records request duration into configurable buckets, enabling percentile calculations (p50, p95, p99) via `histogram_quantile()`. +- **Gauge (`http_requests_in_progress`)** — captures instantaneous concurrency level; useful for detecting overload. +- **Business counters** — separate from HTTP-level metrics, track application-specific behavior. + +### Implementation + +The `/metrics` endpoint is excluded from instrumentation to avoid self-referential noise. Flask hooks `@app.before_request` and `@app.after_request` handle metric recording transparently: + +```python +from prometheus_client import Counter, Histogram, Gauge, generate_latest + +http_requests_total = Counter( + 'http_requests_total', 'Total HTTP requests', + ['method', 'endpoint', 'status'] +) +http_request_duration_seconds = Histogram( + 'http_request_duration_seconds', 'HTTP request duration in seconds', + ['method', 'endpoint'], + buckets=[0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0] +) +http_requests_in_progress = Gauge( + 'http_requests_in_progress', 'HTTP requests currently being processed' +) +``` + +**`/metrics` endpoint output:** + +![Metrics endpoint output](screenshots/metrics_endpoint.png) + +--- + +## 3. Prometheus Configuration + +**File:** `monitoring/prometheus/prometheus.yml` + +```yaml +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'app' + static_configs: + - targets: ['app-python:8080'] + metrics_path: '/metrics' + + - job_name: 'loki' + static_configs: + - targets: ['loki:3100'] + metrics_path: '/metrics' + + - job_name: 'grafana' + static_configs: + - targets: ['grafana:3000'] + metrics_path: '/metrics' +``` + +### Scrape Targets + +| Job | Target | Port | Description | +|-----|--------|------|-------------| +| `prometheus` | `localhost:9090` | 9090 | Prometheus self-monitoring | +| `app` | `app-python:8080` | 8080 | Python Flask application | +| `loki` | `loki:3100` | 3100 | Log storage engine | +| `grafana` | `grafana:3000` | 3000 | Dashboard server | + +### Retention + +Configured via Prometheus CLI flags: + +- `--storage.tsdb.retention.time=15d` — keep data for 15 days +- `--storage.tsdb.retention.size=10GB` — cap TSDB at 10 GB on disk + +--- + +## 4. Dashboard Walkthrough + +The auto-provisioned dashboard **"Application Metrics Dashboard"** contains 8 panels: + +![Application Metrics Dashboard](screenshots/grafana_dashboard.png) + +### Panel 1: Service Uptime (Stat) +- **Query:** `up{job="app"}` +- **Purpose:** Shows if the app target is reachable (UP = 1, DOWN = 0) + +### Panel 2: Request Rate by Endpoint (Time Series) +- **Query:** `sum(rate(http_requests_total[5m])) by (endpoint)` +- **Purpose:** Visualizes requests/second per endpoint — the **R** in RED + +### Panel 3: Error Rate (Time Series) +- **Query:** `sum(rate(http_requests_total{status=~"5.."}[5m]))` +- **Purpose:** Shows 5xx errors/second — the **E** in RED + +### Panel 4: Request Duration p95 (Time Series) +- **Queries:** `histogram_quantile(0.95, ...)` and `histogram_quantile(0.50, ...)` +- **Purpose:** p50 and p95 latency — the **D** in RED + +### Panel 5: Request Duration Heatmap +- **Query:** `sum(increase(http_request_duration_seconds_bucket[5m])) by (le)` +- **Purpose:** Latency distribution visualization across all buckets + +### Panel 6: Active Requests (Time Series) +- **Query:** `http_requests_in_progress` +- **Purpose:** Current concurrent request count + +### Panel 7: Status Code Distribution (Pie Chart) +- **Query:** `sum by (status) (increase(http_requests_total[5m]))` +- **Purpose:** Proportion of 2xx / 4xx / 5xx responses + +### Panel 8: Endpoint Call Count (Bar Gauge) +- **Query:** `sum by (endpoint) (increase(devops_info_endpoint_calls_total[1h]))` +- **Purpose:** Business-level endpoint popularity + +--- + +## 5. PromQL Examples + +### 1. Total request rate (all endpoints combined) +```promql +sum(rate(http_requests_total[5m])) +``` +Returns aggregate requests per second across all methods, endpoints, and statuses. + +### 2. Error rate percentage +```promql +sum(rate(http_requests_total{status=~"5.."}[5m])) / sum(rate(http_requests_total[5m])) * 100 +``` +Computes the percentage of requests resulting in 5xx errors. + +### 3. 95th percentile latency +```promql +histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le)) +``` +Calculates the p95 request duration across all endpoints. + +### 4. Per-endpoint request rate +```promql +sum by (endpoint) (rate(http_requests_total[5m])) +``` +Breaks down request rate by endpoint to identify hot paths. + +### 5. Services down +```promql +up == 0 +``` +Returns all scrape targets that are unreachable. + +### 6. Prometheus scrape duration +```promql +scrape_duration_seconds{job="app"} +``` +Time Prometheus takes to scrape the app — useful for detecting slow `/metrics` endpoints. + +### 7. CPU usage of the app process +```promql +rate(process_cpu_seconds_total{job="app"}[5m]) * 100 +``` +App process CPU utilization percentage (exposed by `prometheus_client` default metrics). + +--- + +## 6. Production Setup + +### 6.1 Health Checks + +| Service | Check Command | Interval | Retries | +|---------|--------------|----------|---------| +| Loki | `wget ... http://localhost:3100/ready` | 10s | 5 | +| Prometheus | `wget ... http://localhost:9090/-/healthy` | 10s | 5 | +| Grafana | `wget ... http://localhost:3000/api/health` | 10s | 5 | +| app-python | `python urllib.request ... /health` | 10s | 5 | +| app-go | `wget ... http://localhost:8080/health` | 10s | 5 | + +### 6.2 Resource Limits + +| Service | CPU Limit | Memory Limit | CPU Reserved | Memory Reserved | +|---------|-----------|--------------|--------------|-----------------| +| Prometheus | 1.0 | 1G | 0.25 | 256M | +| Loki | 1.0 | 1G | 0.25 | 256M | +| Grafana | 0.5 | 512M | 0.25 | 128M | +| Promtail | 0.5 | 512M | 0.1 | 128M | +| app-python | 0.5 | 256M | 0.1 | 64M | +| app-go | 0.5 | 256M | 0.1 | 64M | + +### 6.3 Retention Policies + +| Component | Retention Period | Mechanism | +|-----------|-----------------|-----------| +| Prometheus | 15 days / 10 GB cap | `--storage.tsdb.retention.time`, `--storage.tsdb.retention.size` | +| Loki | 7 days (168h) | `limits_config.retention_period` + compactor | + +### 6.4 Persistent Volumes + +Three named volumes survive `docker compose down` (without `-v`): + +- `prometheus-data` → `/prometheus` (TSDB data) +- `loki-data` → `/loki` (log chunks and index) +- `grafana-data` → `/var/lib/grafana` (dashboards, users, settings) + +**Persistence test:** +1. Start stack, generate traffic, verify dashboard data +2. `docker compose down` (no `-v`) +3. `docker compose up -d` +4. Grafana dashboard and Prometheus data are retained + +### 6.5 Grafana Provisioning + +Data sources and dashboards are provisioned via config files (no manual UI setup): + +- `grafana/provisioning/datasources/datasources.yml` — Prometheus + Loki +- `grafana/provisioning/dashboards/dashboards.yml` — file-based dashboard provider +- `grafana/dashboards/app-metrics.json` — application metrics dashboard + +--- + +## 7. Testing Results + +### Deploy stack + +```bash +cd monitoring +docker compose up -d --build +docker compose ps +``` + +### Generate test traffic + +```bash +for i in {1..30}; do curl -s http://localhost:8000/; done +for i in {1..30}; do curl -s http://localhost:8000/health; done +for i in {1..5}; do curl -s http://localhost:8000/nonexistent; done +``` + +### Verify metrics endpoint + +```bash +curl -s http://localhost:8000/metrics | head -20 +``` + +Expected output includes: +``` +# HELP http_requests_total Total HTTP requests +# TYPE http_requests_total counter +http_requests_total{endpoint="/",method="GET",status="200"} 30.0 +http_requests_total{endpoint="/health",method="GET",status="200"} 30.0 +http_requests_total{endpoint="/nonexistent",method="GET",status="404"} 5.0 +``` + +### Verify Prometheus targets + +Open http://localhost:9090/targets — all 4 targets should show **UP** (green). + +**Prometheus targets — all UP:** + +![Prometheus targets](screenshots/prometheus_targets.png) + +**PromQL query in Prometheus UI:** + +![Prometheus query](screenshots/prometheus_query.png) + +### Verify Grafana dashboard + +Open http://localhost:3000 → Dashboards → "Application Metrics Dashboard" — all 8 panels should show data. + +**Grafana dashboard with live metrics data:** + +![Grafana application metrics dashboard](screenshots/grafana_dashboard.png) + +### Verify services healthy + +**`docker compose ps` — all services running and healthy:** + +![docker compose ps](screenshots/docker_compose_ps.png) + +### Persistence test + +After `docker compose down` and `docker compose up -d`, data and dashboards are retained: + +**Terminal — restart sequence:** + +![Persistence test — terminal](screenshots/persistence_test_1.png) + +**Grafana — dashboard intact after restart:** + +![Persistence test — Grafana](screenshots/persistence_test_2.png) + +--- + +## 8. Challenges & Solutions + +### Challenge 1: Excluding /metrics from instrumentation + +Scraping `/metrics` generates a request, which would increment `http_requests_total` for `/metrics` on every Prometheus scrape (every 15s), polluting actual traffic data. + +**Solution:** Added early-return guards in `before_request` and `after_request` hooks when `request.path == '/metrics'`. + +### Challenge 2: Health check in slim Python image + +`python:3.12-slim` doesn't include `curl` or `wget`. Using `apt-get install curl` would bloat the image. + +**Solution:** Used Python's built-in `urllib.request` for the health check command. + +### Challenge 3: Grafana datasource provisioning UIDs + +Hardcoding datasource UIDs in dashboard JSON breaks when Grafana assigns different UIDs to provisioned datasources. + +**Solution:** Used datasource name string (`"Prometheus"`) in dashboard panel configs instead of UID references. + +### Challenge 4: Histogram bucket selection + +Default Prometheus histogram buckets are too coarse for a fast API (most requests < 10ms). + +**Solution:** Configured custom buckets `[0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0]` to capture sub-10ms latencies accurately. + +--- + +## Bonus: Ansible Automation + +### Extended Role Structure + +``` +ansible/roles/monitoring/ +├── defaults/main.yml # All variables (versions, ports, targets, limits) +├── files/ +│ └── grafana-app-dashboard.json # Dashboard JSON +├── tasks/ +│ ├── main.yml # Orchestration entry point +│ ├── setup.yml # Dirs, template configs, copy dashboards +│ └── deploy.yml # Docker compose, health checks +├── templates/ +│ ├── docker-compose.yml.j2 # Full stack (Loki + Promtail + Prometheus + Grafana) +│ ├── loki-config.yml.j2 +│ ├── promtail-config.yml.j2 +│ ├── prometheus.yml.j2 # Templated from prometheus_targets variable +│ ├── grafana-datasources.yml.j2 # Prometheus + Loki datasources +│ └── grafana-dashboards-provisioning.yml.j2 # Dashboard file provider +└── meta/main.yml # Depends on: docker role +``` + +### Key Variables Added (Lab 8) + +```yaml +prometheus_version: "3.9.0" +prometheus_port: 9090 +prometheus_retention_days: 15 +prometheus_retention_size: "10GB" +prometheus_scrape_interval: "15s" + +prometheus_targets: + - job: "prometheus" + targets: ["localhost:9090"] + - job: "app" + targets: ["app-python:8080"] + path: "/metrics" + - job: "loki" + targets: ["loki:3100"] + path: "/metrics" + - job: "grafana" + targets: ["grafana:3000"] + path: "/metrics" +``` + +### Prometheus Config Template + +`prometheus.yml.j2` generates scrape config from the `prometheus_targets` list: + +```yaml +global: + scrape_interval: {{ prometheus_scrape_interval }} + +scrape_configs: +{% for target in prometheus_targets %} + - job_name: '{{ target.job }}' + static_configs: + - targets: {{ target.targets | to_json }} +{% if target.path is defined %} + metrics_path: '{{ target.path }}' +{% endif %} +{% endfor %} +``` + +### Single-Command Deployment + +```bash +ansible-playbook ansible/playbooks/deploy-monitoring.yml +``` + +Deploys the entire observability stack: +- Loki + Promtail (logging from Lab 7) +- Prometheus (metrics from Lab 8) +- Grafana with auto-provisioned datasources (Loki + Prometheus) and dashboards +- All configs, health checks, resource limits, and retention policies diff --git a/monitoring/docs/screenshots/All_apps_logs.png b/monitoring/docs/screenshots/All_apps_logs.png new file mode 100644 index 0000000000..6505e00adc Binary files /dev/null and b/monitoring/docs/screenshots/All_apps_logs.png differ diff --git a/monitoring/docs/screenshots/Loki_app_go_logs.png b/monitoring/docs/screenshots/Loki_app_go_logs.png new file mode 100644 index 0000000000..363ab28d4e Binary files /dev/null and b/monitoring/docs/screenshots/Loki_app_go_logs.png differ diff --git a/monitoring/docs/screenshots/Loki_app_python_logs.png b/monitoring/docs/screenshots/Loki_app_python_logs.png new file mode 100644 index 0000000000..f8e240417e Binary files /dev/null and b/monitoring/docs/screenshots/Loki_app_python_logs.png differ diff --git a/monitoring/docs/screenshots/docker_compose_ps.png b/monitoring/docs/screenshots/docker_compose_ps.png new file mode 100644 index 0000000000..f31c5a5b03 Binary files /dev/null and b/monitoring/docs/screenshots/docker_compose_ps.png differ diff --git a/monitoring/docs/screenshots/grafana_dashboard.png b/monitoring/docs/screenshots/grafana_dashboard.png new file mode 100644 index 0000000000..007c80503e Binary files /dev/null and b/monitoring/docs/screenshots/grafana_dashboard.png differ diff --git a/monitoring/docs/screenshots/login.png b/monitoring/docs/screenshots/login.png new file mode 100644 index 0000000000..d2a43bf4e3 Binary files /dev/null and b/monitoring/docs/screenshots/login.png differ diff --git a/monitoring/docs/screenshots/metrics_endpoint.png b/monitoring/docs/screenshots/metrics_endpoint.png new file mode 100644 index 0000000000..24947c49ca Binary files /dev/null and b/monitoring/docs/screenshots/metrics_endpoint.png differ diff --git a/monitoring/docs/screenshots/persistence_test_1.png b/monitoring/docs/screenshots/persistence_test_1.png new file mode 100644 index 0000000000..a77a408ddd Binary files /dev/null and b/monitoring/docs/screenshots/persistence_test_1.png differ diff --git a/monitoring/docs/screenshots/persistence_test_2.png b/monitoring/docs/screenshots/persistence_test_2.png new file mode 100644 index 0000000000..a2a49ed648 Binary files /dev/null and b/monitoring/docs/screenshots/persistence_test_2.png differ diff --git a/monitoring/docs/screenshots/prometheus_query.png b/monitoring/docs/screenshots/prometheus_query.png new file mode 100644 index 0000000000..eeaec7dcaf Binary files /dev/null and b/monitoring/docs/screenshots/prometheus_query.png differ diff --git a/monitoring/docs/screenshots/prometheus_targets.png b/monitoring/docs/screenshots/prometheus_targets.png new file mode 100644 index 0000000000..8278bc88a4 Binary files /dev/null and b/monitoring/docs/screenshots/prometheus_targets.png differ diff --git a/monitoring/grafana/dashboards/app-metrics.json b/monitoring/grafana/dashboards/app-metrics.json new file mode 100644 index 0000000000..6ace8e1836 --- /dev/null +++ b/monitoring/grafana/dashboards/app-metrics.json @@ -0,0 +1,319 @@ +{ + "annotations": { + "list": [] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "panels": [ + { + "title": "Service Uptime", + "type": "stat", + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "mappings": [ + { + "options": { + "0": { + "color": "red", + "text": "DOWN" + }, + "1": { + "color": "green", + "text": "UP" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "background", + "graphMode": "none", + "textMode": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ] + } + }, + "targets": [ + { + "expr": "up{job=\"app\"}", + "legendFormat": "app-python", + "refId": "A" + } + ] + }, + { + "title": "Request Rate by Endpoint", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 6, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "reqps", + "custom": { + "lineWidth": 2, + "fillOpacity": 15, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total[5m])) by (endpoint)", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ] + }, + { + "title": "Error Rate (5xx)", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 0 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "reqps", + "color": { + "mode": "fixed", + "fixedColor": "red" + }, + "custom": { + "lineWidth": 2, + "fillOpacity": 30, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total{status=~\"5..\"}[5m]))", + "legendFormat": "5xx errors/s", + "refId": "A" + } + ] + }, + { + "title": "Request Duration p95", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "s", + "custom": { + "lineWidth": 2, + "fillOpacity": 10, + "showPoints": "never" + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 0.5 + }, + { + "color": "red", + "value": 1.0 + } + ] + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))", + "legendFormat": "p95 latency", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket[5m])) by (le))", + "legendFormat": "p50 latency", + "refId": "B" + } + ] + }, + { + "title": "Request Duration Heatmap", + "type": "heatmap", + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "datasource": "Prometheus", + "options": { + "calculate": false, + "yAxis": { + "unit": "s" + }, + "color": { + "scheme": "Oranges" + } + }, + "targets": [ + { + "expr": "sum(increase(http_request_duration_seconds_bucket[5m])) by (le)", + "legendFormat": "{{le}}", + "refId": "A", + "format": "heatmap" + } + ] + }, + { + "title": "Active Requests", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short", + "color": { + "mode": "fixed", + "fixedColor": "blue" + }, + "custom": { + "lineWidth": 2, + "fillOpacity": 20, + "showPoints": "never" + } + }, + "overrides": [] + }, + "targets": [ + { + "expr": "http_requests_in_progress", + "legendFormat": "in-progress", + "refId": "A" + } + ] + }, + { + "title": "Status Code Distribution", + "type": "piechart", + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "datasource": "Prometheus", + "options": { + "legend": { + "displayMode": "table", + "placement": "right" + }, + "pieType": "donut" + }, + "targets": [ + { + "expr": "sum by (status) (increase(http_requests_total[5m]))", + "legendFormat": "{{status}}", + "refId": "A" + } + ] + }, + { + "title": "Endpoint Call Count", + "type": "bargauge", + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "options": { + "orientation": "horizontal", + "displayMode": "gradient" + }, + "targets": [ + { + "expr": "sum by (endpoint) (increase(devops_info_endpoint_calls_total[1h]))", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ] + } + ], + "schemaVersion": 39, + "tags": [ + "application", + "metrics", + "RED" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Application Metrics Dashboard", + "uid": "app-metrics-dashboard", + "version": 1, + "refresh": "10s" +} \ No newline at end of file diff --git a/monitoring/grafana/provisioning/dashboards/dashboards.yml b/monitoring/grafana/provisioning/dashboards/dashboards.yml new file mode 100644 index 0000000000..eadf91635c --- /dev/null +++ b/monitoring/grafana/provisioning/dashboards/dashboards.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards + foldersFromFilesStructure: false diff --git a/monitoring/grafana/provisioning/datasources/datasources.yml b/monitoring/grafana/provisioning/datasources/datasources.yml new file mode 100644 index 0000000000..2c0808d808 --- /dev/null +++ b/monitoring/grafana/provisioning/datasources/datasources.yml @@ -0,0 +1,15 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + editable: false + + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + editable: false diff --git a/monitoring/loki/config.yml b/monitoring/loki/config.yml new file mode 100644 index 0000000000..47bdd8ad74 --- /dev/null +++ b/monitoring/loki/config.yml @@ -0,0 +1,46 @@ +--- +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + path_prefix: /loki + replication_factor: 1 + ring: + kvstore: + store: inmemory + +schema_config: + configs: + - from: "2024-01-01" + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + +storage_config: + tsdb_shipper: + active_index_directory: /loki/tsdb-index + cache_location: /loki/tsdb-cache + filesystem: + directory: /loki/chunks + +limits_config: + retention_period: 168h + reject_old_samples: true + reject_old_samples_max_age: 168h + max_query_series: 500 + max_query_parallelism: 2 + +compactor: + working_directory: /loki/compactor + compaction_interval: 10m + retention_enabled: true + retention_delete_delay: 2h + delete_request_store: filesystem + +analytics: + reporting_enabled: false diff --git a/monitoring/prometheus/prometheus.yml b/monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000000..ec55159ebc --- /dev/null +++ b/monitoring/prometheus/prometheus.yml @@ -0,0 +1,23 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: 'prometheus' + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'app' + static_configs: + - targets: ['app-python:8080'] + metrics_path: '/metrics' + + - job_name: 'loki' + static_configs: + - targets: ['loki:3100'] + metrics_path: '/metrics' + + - job_name: 'grafana' + static_configs: + - targets: ['grafana:3000'] + metrics_path: '/metrics' diff --git a/monitoring/promtail/config.yml b/monitoring/promtail/config.yml new file mode 100644 index 0000000000..1bf86411db --- /dev/null +++ b/monitoring/promtail/config.yml @@ -0,0 +1,27 @@ +--- +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_label_app'] + target_label: 'app' + - source_labels: ['__meta_docker_container_label_app'] + target_label: 'job' diff --git a/pulumi/.gitignore b/pulumi/.gitignore new file mode 100644 index 0000000000..02d602e26b --- /dev/null +++ b/pulumi/.gitignore @@ -0,0 +1,10 @@ +# Virtual environment +venv/ + +# Python cache +__pycache__/ +*.pyc +*.pyo + +# Stack configs (may contain secrets) +Pulumi.*.yaml diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..51e71ff5a0 --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,6 @@ +name: lab04-infra +runtime: + name: python + options: + virtualenv: venv +description: "Lab 04 — Yandex Cloud VM with Pulumi (Python)" diff --git a/pulumi/README.md b/pulumi/README.md new file mode 100644 index 0000000000..29ed2e2193 --- /dev/null +++ b/pulumi/README.md @@ -0,0 +1,39 @@ +# Pulumi — Yandex Cloud VM (Python) + +Recreates the same infrastructure as Terraform using Pulumi with Python. + +## Prerequisites + +- Pulumi CLI >= 3.x +- Python >= 3.9 +- Yandex Cloud account with configured CLI + +## Usage + +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt + +pulumi stack init dev +pulumi config set yandex:token YOUR_TOKEN --secret +pulumi config set yandex:cloudId YOUR_CLOUD_ID +pulumi config set yandex:folderId YOUR_FOLDER_ID +pulumi config set sshPublicKey "ssh-rsa AAAA..." + +pulumi preview +pulumi up +``` + +## Connect to VM + +```bash +pulumi stack output ssh_command +``` + +## Cleanup + +```bash +pulumi destroy +pulumi stack rm dev +``` diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..95baada4df --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,108 @@ +"""Lab 04 — Yandex Cloud VM with Pulumi (Python).""" + +import pulumi +import pulumi_yandex as yandex + +config = pulumi.Config() +zone = config.get("zone") or "ru-central1-a" +vm_user = config.get("vmUser") or "ubuntu" +ssh_public_key = config.require("sshPublicKey") + +# --- Network --- + +network = yandex.VpcNetwork( + "lab04-network", + name="lab04-network", + labels={"project": "devops-lab04"}, +) + +subnet = yandex.VpcSubnet( + "lab04-subnet", + name="lab04-subnet", + zone=zone, + network_id=network.id, + v4_cidr_blocks=["10.0.1.0/24"], + labels={"project": "devops-lab04"}, +) + +# --- Security Group --- + +security_group = yandex.VpcSecurityGroup( + "lab04-sg", + name="lab04-sg", + network_id=network.id, + ingresses=[ + yandex.VpcSecurityGroupIngressArgs( + description="SSH", + protocol="TCP", + port=22, + v4_cidr_blocks=["0.0.0.0/0"], + ), + yandex.VpcSecurityGroupIngressArgs( + description="HTTP", + protocol="TCP", + port=80, + v4_cidr_blocks=["0.0.0.0/0"], + ), + yandex.VpcSecurityGroupIngressArgs( + description="App port", + protocol="TCP", + port=5000, + v4_cidr_blocks=["0.0.0.0/0"], + ), + ], + egresses=[ + yandex.VpcSecurityGroupEgressArgs( + description="Allow all outbound", + protocol="ANY", + v4_cidr_blocks=["0.0.0.0/0"], + ), + ], + labels={"project": "devops-lab04"}, +) + +# --- Compute Instance --- + +image = yandex.get_compute_image(family="ubuntu-2404-lts") + +instance = yandex.ComputeInstance( + "lab04-vm", + name="lab04-vm", + platform_id="standard-v2", + zone=zone, + resources=yandex.ComputeInstanceResourcesArgs( + cores=2, + memory=1, + core_fraction=20, + ), + boot_disk=yandex.ComputeInstanceBootDiskArgs( + initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs( + image_id=image.id, + size=10, + type="network-hdd", + ), + ), + network_interfaces=[ + yandex.ComputeInstanceNetworkInterfaceArgs( + subnet_id=subnet.id, + nat=True, + security_group_ids=[security_group.id], + ), + ], + metadata={ + "ssh-keys": f"{vm_user}:{ssh_public_key}", + }, + labels={ + "project": "devops-lab04", + "env": "dev", + }, +) + +# --- Outputs --- + +pulumi.export("vm_public_ip", instance.network_interfaces[0].nat_ip_address) +pulumi.export("vm_name", instance.name) +pulumi.export( + "ssh_command", + pulumi.Output.concat("ssh ", vm_user, "@", instance.network_interfaces[0].nat_ip_address), +) diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..2e2d977f6b --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,3 @@ +pulumi>=3.0.0,<4.0.0 +pulumi-yandex>=0.13.0 +setuptools<70 diff --git a/screenshots/lab17/01-dashboard-worker.png b/screenshots/lab17/01-dashboard-worker.png new file mode 100644 index 0000000000..69fc101765 Binary files /dev/null and b/screenshots/lab17/01-dashboard-worker.png differ diff --git a/screenshots/lab17/02-public-url-curl.png b/screenshots/lab17/02-public-url-curl.png new file mode 100644 index 0000000000..adc14e71a4 Binary files /dev/null and b/screenshots/lab17/02-public-url-curl.png differ diff --git a/screenshots/lab17/03-kv-namespace.png b/screenshots/lab17/03-kv-namespace.png new file mode 100644 index 0000000000..2395fa1a6e Binary files /dev/null and b/screenshots/lab17/03-kv-namespace.png differ diff --git a/screenshots/lab17/04-secrets.png b/screenshots/lab17/04-secrets.png new file mode 100644 index 0000000000..5c26868508 Binary files /dev/null and b/screenshots/lab17/04-secrets.png differ diff --git a/screenshots/lab17/05-wrangler-tail.png b/screenshots/lab17/05-wrangler-tail.png new file mode 100644 index 0000000000..eafa613483 Binary files /dev/null and b/screenshots/lab17/05-wrangler-tail.png differ diff --git a/screenshots/lab17/06-dashboard-logs.png b/screenshots/lab17/06-dashboard-logs.png new file mode 100644 index 0000000000..dfaffe19b9 Binary files /dev/null and b/screenshots/lab17/06-dashboard-logs.png differ diff --git a/screenshots/lab17/07-dashboard-metrics.png b/screenshots/lab17/07-dashboard-metrics.png new file mode 100644 index 0000000000..be78598bd3 Binary files /dev/null and b/screenshots/lab17/07-dashboard-metrics.png differ diff --git a/screenshots/lab17/08-deployments-history.png b/screenshots/lab17/08-deployments-history.png new file mode 100644 index 0000000000..1b17172729 Binary files /dev/null and b/screenshots/lab17/08-deployments-history.png differ diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..75a299d0b9 --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,24 @@ +# Terraform state +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl + +# Variable values (contain secrets) +terraform.tfvars +*.tfvars +!*.tfvars.example + +# Crash logs +crash.log +crash.*.log + +# Override files +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Credentials +*.pem +*.key diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl new file mode 100644 index 0000000000..427121c3ef --- /dev/null +++ b/terraform/.tflint.hcl @@ -0,0 +1,4 @@ +plugin "terraform" { + enabled = true + preset = "recommended" +} diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000000..b0261aa63e --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,41 @@ +# Terraform — Yandex Cloud VM + +Creates a VM with network, subnet, security group, and public IP on Yandex Cloud. + +## Prerequisites + +- Terraform >= 1.9.0 +- Yandex Cloud account with configured CLI (`yc init`) +- SSH key pair (`ssh-keygen -t rsa -b 4096`) + +## Usage + +```bash +cp terraform.tfvars.example terraform.tfvars +# Edit terraform.tfvars with your values + +terraform init +terraform plan +terraform apply +``` + +## Connect to VM + +```bash +terraform output ssh_command +``` + +## Resources + +| Resource | Type | Details | +|----------|------|---------| +| VPC Network | `yandex_vpc_network` | lab04-network | +| Subnet | `yandex_vpc_subnet` | 10.0.1.0/24, ru-central1-a | +| Security Group | `yandex_vpc_security_group` | SSH(22), HTTP(80), App(5000) | +| VM | `yandex_compute_instance` | 2 vCPU (20%), 1 GB RAM, Ubuntu 24.04, 10 GB HDD | + +## Cleanup + +```bash +terraform destroy +``` diff --git a/terraform/github/.gitignore b/terraform/github/.gitignore new file mode 100644 index 0000000000..2772eace99 --- /dev/null +++ b/terraform/github/.gitignore @@ -0,0 +1,9 @@ +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl +terraform.tfvars +*.tfvars +!*.tfvars.example +crash.log +crash.*.log diff --git a/terraform/github/README.md b/terraform/github/README.md new file mode 100644 index 0000000000..9f69d3098d --- /dev/null +++ b/terraform/github/README.md @@ -0,0 +1,71 @@ +# GitHub Repository Management with Terraform + +This Terraform configuration manages the DevOps-Core-Course GitHub repository settings through Infrastructure as Code. + +## Purpose + +Demonstrates **terraform import** — bringing existing manually-created infrastructure under Terraform management (brownfield IaC adoption). + +## What's Managed + +- Repository metadata (name, description, visibility) +- Feature flags (issues, wiki, projects) +- Merge settings (merge commit, squash, rebase) +- Branch protection (optional, can be added) + +## Setup + +1. **Create GitHub Personal Access Token:** + - Go to: Settings → Developer settings → Personal access tokens → Tokens (classic) + - Generate new token with `repo` scope (full control of private repositories) + - Copy token (shown once!) + +2. **Configure authentication:** + ```bash + export GITHUB_TOKEN="your-token-here" + export TF_VAR_github_owner="your-github-username" + ``` + +3. **Initialize Terraform:** + ```bash + terraform init + ``` + +## Usage + +### View current state +```bash +terraform plan +``` + +### Apply changes +```bash +terraform apply +``` + +### Import existing repository (already done) +```bash +terraform import github_repository.course_repo DevOps-Core-Course +``` + +## Import Process Notes + +The repository was imported with: +```bash +terraform import github_repository.course_repo DevOps-Core-Course +``` + +After import, configuration was updated to match actual GitHub state to prevent unwanted changes. + +## Security + +- Never commit `terraform.tfvars` with tokens +- Use environment variables or encrypted secrets +- `.gitignore` excludes sensitive files +- State file is gitignored (contains repo metadata) + +## Resources + +- [GitHub Provider Docs](https://registry.terraform.io/providers/integrations/github/latest/docs) +- [Repository Resource](https://registry.terraform.io/providers/integrations/github/latest/docs/resources/repository) +- [Import Guide](https://developer.hashicorp.com/terraform/cli/import) diff --git a/terraform/github/main.tf b/terraform/github/main.tf new file mode 100644 index 0000000000..ee1efeb1cc --- /dev/null +++ b/terraform/github/main.tf @@ -0,0 +1,31 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} + +provider "github" { + token = var.github_token + owner = var.github_owner +} + +resource "github_repository" "course_repo" { + name = "DevOps-Core-Course" + description = "🚀Production-grade DevOps course: 18 hands-on labs covering Docker, Kubernetes, Helm, Terraform, Ansible, CI/CD, GitOps (ArgoCD), monitoring (Prometheus/Grafana), and more. Build real-world skills with progressive delivery, secrets management, and cloud-native deployments." + visibility = "public" + + has_issues = false + has_wiki = true + has_projects = true + + allow_merge_commit = false + allow_squash_merge = false + allow_rebase_merge = false + + delete_branch_on_merge = false +} diff --git a/terraform/github/outputs.tf b/terraform/github/outputs.tf new file mode 100644 index 0000000000..9d6e0b9628 --- /dev/null +++ b/terraform/github/outputs.tf @@ -0,0 +1,14 @@ +output "repository_url" { + description = "Repository HTML URL" + value = github_repository.course_repo.html_url +} + +output "repository_name" { + description = "Repository name" + value = github_repository.course_repo.name +} + +output "repository_full_name" { + description = "Repository full name (owner/repo)" + value = github_repository.course_repo.full_name +} diff --git a/terraform/github/variables.tf b/terraform/github/variables.tf new file mode 100644 index 0000000000..742aca9adf --- /dev/null +++ b/terraform/github/variables.tf @@ -0,0 +1,12 @@ +variable "github_token" { + description = "GitHub Personal Access Token with repo scope" + type = string + sensitive = true + default = "" +} + +variable "github_owner" { + description = "GitHub username or organization" + type = string + default = "" +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..c83b6e78a2 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,117 @@ +terraform { + required_version = ">= 1.5.0" + + required_providers { + yandex = { + source = "yandex-cloud/yandex" + version = ">= 0.187.0" + } + } +} + +provider "yandex" { + service_account_key_file = var.yc_service_account_key_file + cloud_id = var.yc_cloud_id + folder_id = var.yc_folder_id + zone = var.yc_zone +} + +# --- Network --- + +resource "yandex_vpc_network" "lab04" { + name = "lab04-network" + + labels = { + project = "devops-lab04" + } +} + +resource "yandex_vpc_subnet" "lab04" { + name = "lab04-subnet" + zone = var.yc_zone + network_id = yandex_vpc_network.lab04.id + v4_cidr_blocks = ["10.0.1.0/24"] + + labels = { + project = "devops-lab04" + } +} + +# --- Security Group --- + +resource "yandex_vpc_security_group" "lab04" { + name = "lab04-sg" + network_id = yandex_vpc_network.lab04.id + + ingress { + description = "SSH" + protocol = "TCP" + port = 22 + v4_cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + description = "HTTP" + protocol = "TCP" + port = 80 + v4_cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + description = "App port" + protocol = "TCP" + port = 5000 + v4_cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "Allow all outbound" + protocol = "ANY" + v4_cidr_blocks = ["0.0.0.0/0"] + } + + labels = { + project = "devops-lab04" + } +} + +# --- Compute Instance --- + +data "yandex_compute_image" "ubuntu" { + family = "ubuntu-2404-lts" +} + +resource "yandex_compute_instance" "lab04" { + name = "lab04-vm" + platform_id = "standard-v2" + zone = var.yc_zone + + resources { + cores = 2 + memory = 1 + core_fraction = 20 + } + + boot_disk { + initialize_params { + image_id = data.yandex_compute_image.ubuntu.id + size = 10 + type = "network-hdd" + } + } + + network_interface { + subnet_id = yandex_vpc_subnet.lab04.id + nat = true + security_group_ids = [yandex_vpc_security_group.lab04.id] + } + + metadata = { + ssh-keys = "${var.vm_user}:${var.ssh_public_key}" + } + + labels = { + project = "devops-lab04" + env = "dev" + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..b8986747a9 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,24 @@ +output "vm_public_ip" { + description = "Public IP address of the VM" + value = yandex_compute_instance.lab04.network_interface[0].nat_ip_address +} + +output "vm_name" { + description = "Name of the VM" + value = yandex_compute_instance.lab04.name +} + +output "ssh_command" { + description = "SSH command to connect to the VM" + value = "ssh ${var.vm_user}@${yandex_compute_instance.lab04.network_interface[0].nat_ip_address}" +} + +output "subnet_id" { + description = "Subnet ID" + value = yandex_vpc_subnet.lab04.id +} + +output "security_group_id" { + description = "Security group ID" + value = yandex_vpc_security_group.lab04.id +} diff --git a/terraform/terraform.tfvars.example b/terraform/terraform.tfvars.example new file mode 100644 index 0000000000..00b2a74eec --- /dev/null +++ b/terraform/terraform.tfvars.example @@ -0,0 +1,13 @@ +# Yandex Cloud credentials +yc_service_account_key_file = "/path/to/sa-key.json" +yc_cloud_id = "YOUR_CLOUD_ID" +yc_folder_id = "YOUR_FOLDER_ID" + +# Region +yc_zone = "ru-central1-a" + +# VM settings +vm_user = "ubuntu" + +# Paste your public SSH key content here (cat ~/.ssh/id_rsa.pub) +ssh_public_key = "ssh-rsa AAAA..." diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..a6b2a87dab --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,35 @@ +variable "yc_service_account_key_file" { + description = "Path to Yandex Cloud service account key JSON file" + type = string + default = "" +} + +variable "yc_cloud_id" { + description = "Yandex Cloud ID" + type = string + default = "" +} + +variable "yc_folder_id" { + description = "Yandex Cloud Folder ID" + type = string + default = "" +} + +variable "yc_zone" { + description = "Yandex Cloud availability zone" + type = string + default = "ru-central1-a" +} + +variable "vm_user" { + description = "Username for the VM" + type = string + default = "ubuntu" +} + +variable "ssh_public_key" { + description = "SSH public key content for VM access" + type = string + default = "" +}