diff --git a/dev/deploy-test-env.sh b/dev/deploy-test-env.sh new file mode 100755 index 0000000..d49e594 --- /dev/null +++ b/dev/deploy-test-env.sh @@ -0,0 +1,134 @@ +#!/bin/bash +# deploy-test-env.sh — Deploy test-env to dev cluster and verify +# +# Usage: +# bash dev/deploy-test-env.sh [--check-only] [--create-secrets] +# +# Prerequisites: +# - kubectl configured for dev cluster +# - Images benadis/pg-1c:18.1-2.1C and benadis/ar-edt:6.2.27.1 accessible +# +# This script: +# 1. Validates kustomize build +# 2. Applies manifests via kustomize +# 3. Creates secrets if --create-secrets +# 4. Waits for pods to be ready +# 5. Runs smoke tests (pg_isready, ragent check) + +set -euo pipefail +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +CHECK_ONLY=false +CREATE_SECRETS=false + +for arg in "$@"; do + case $arg in + --check-only) CHECK_ONLY=true ;; + --create-secrets) CREATE_SECRETS=true ;; + esac +done + +echo "=== test-env deployment ===" + +# --- Step 1: Validate kustomize --- +echo -e "\n${YELLOW}[1/5] Validating kustomize build...${NC}" +if kubectl kustomize test-env/ > /dev/null 2>&1; then + echo -e "${GREEN} ✓ kustomize build OK${NC}" +else + echo -e "${RED} ✗ kustomize build FAILED${NC}" + kubectl kustomize test-env/ 2>&1 | head -20 + exit 1 +fi + +if $CHECK_ONLY; then + echo -e "\n${GREEN}Validation passed (--check-only)${NC}" + kubectl kustomize test-env/ | grep -c 'kind:' | xargs -I{} echo " {} resources" + exit 0 +fi + +# --- Step 2: Apply manifests --- +echo -e "\n${YELLOW}[2/5] Applying manifests...${NC}" +kubectl apply -k test-env/ +echo -e "${GREEN} ✓ Manifests applied${NC}" + +# --- Step 3: Create secrets if needed --- +if $CREATE_SECRETS; then + echo -e "\n${YELLOW}[3/5] Creating secrets...${NC}" + kubectl -n test-env create secret generic test-env-secrets \ + --from-literal=pg-password=usr1cv8 \ + --dry-run=client -o yaml | kubectl apply -f - + echo -e "${GREEN} ✓ Secrets created${NC}" +else + echo -e "\n${YELLOW}[3/5] Checking secrets...${NC}" + if kubectl -n test-env get secret test-env-secrets > /dev/null 2>&1; then + echo -e "${GREEN} ✓ test-env-secrets exists${NC}" + else + echo -e "${RED} ✗ test-env-secrets missing — run with --create-secrets${NC}" + fi +fi + +# --- Step 4: Wait for pods --- +echo -e "\n${YELLOW}[4/5] Waiting for pods (timeout 120s)...${NC}" + +wait_for_pod() { + local label=$1 + local timeout=${2:-120} + local start=$(date +%s) + while true; do + local phase=$(kubectl -n test-env get pods -l "$label" -o jsonpath='{.items[0].status.phase}' 2>/dev/null || echo "Pending") + if [ "$phase" = "Running" ]; then + echo -e "${GREEN} ✓ $label → Running${NC}" + return 0 + fi + local elapsed=$(( $(date +%s) - start )) + if [ $elapsed -gt $timeout ]; then + echo -e "${RED} ✗ $label → $phase (timeout ${timeout}s)${NC}" + return 1 + fi + sleep 5 + done +} + +wait_for_pod "app=test-pg" 120 +wait_for_pod "app=onec-server" 120 + +# --- Step 5: Smoke tests --- +echo -e "\n${YELLOW}[5/5] Smoke tests...${NC}" + +# PostgreSQL ready +PG_POD=$(kubectl -n test-env get pod -l app=test-pg -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") +if [ -n "$PG_POD" ]; then + if kubectl -n test-env exec "$PG_POD" -- su - postgres -c "/usr/lib/postgresql/18/bin/pg_isready" > /dev/null 2>&1; then + echo -e "${GREEN} ✓ PostgreSQL is ready${NC}" + else + echo -e "${RED} ✗ PostgreSQL pg_isready failed${NC}" + fi +fi + +# 1C server ragent running +ONEC_POD=$(kubectl -n test-env get pod -l app=onec-server -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") +if [ -n "$ONEC_POD" ]; then + if kubectl -n test-env exec "$ONEC_POD" -- pgrep ragent > /dev/null 2>&1; then + echo -e "${GREEN} ✓ ragent is running${NC}" + else + echo -e "${RED} ✗ ragent not running${NC}" + fi + if kubectl -n test-env exec "$ONEC_POD" -- pgrep crserver > /dev/null 2>&1; then + echo -e "${GREEN} ✓ crserver is running${NC}" + else + echo -e "${RED} ✗ crserver not running${NC}" + fi +fi + +# Summary +echo -e "\n=== Status ===" +kubectl -n test-env get pods -o wide +echo "" +kubectl -n test-env get svc +echo "" +kubectl -n test-env get pvc diff --git a/test-env/.gitkeep b/test-env/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/test-env/config.yaml b/test-env/config.yaml new file mode 100644 index 0000000..34ff202 --- /dev/null +++ b/test-env/config.yaml @@ -0,0 +1,10 @@ +{ + "name": "test-env", + "namespace": "test-env", + "step": "6", + "source": { + "repoURL": "https://github.com/Kargones/deploy-app-kargo-private.git", + "path": "test-env", + "targetRevision": "main" + } +} diff --git a/test-env/gitea-runner/configmap.yaml b/test-env/gitea-runner/configmap.yaml new file mode 100644 index 0000000..23de4ab --- /dev/null +++ b/test-env/gitea-runner/configmap.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-env-runner-config + namespace: test-env +data: + config.yaml: | + log: + level: info + runner: + file: .runner + capacity: 1 + timeout: 3h + labels: + - "edt:docker://benadis/ar-edt-slim:latest" + - "ubuntu-latest:docker://node:20-bullseye" + cache: + enabled: true + dir: "" + container: + network: "" + privileged: false + options: + workdir_parent: diff --git a/test-env/gitea-runner/deployment.yaml b/test-env/gitea-runner/deployment.yaml new file mode 100644 index 0000000..607796b --- /dev/null +++ b/test-env/gitea-runner/deployment.yaml @@ -0,0 +1,98 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-env-runner + namespace: test-env + labels: + app: test-env-runner +spec: + replicas: 0 # Scaled by register-job after token is obtained. + # NOTE: requires Docker-in-Docker (DinD) sidecar to run workflows. + # See TODO below for DinD configuration. + selector: + matchLabels: + app: test-env-runner + template: + metadata: + labels: + app: test-env-runner + spec: + containers: + # Docker-in-Docker sidecar (required for act_runner to execute workflows) + - name: dind + image: docker:27-dind + securityContext: + privileged: true + env: + - name: DOCKER_TLS_CERTDIR + value: "" + volumeMounts: + - name: docker-socket + mountPath: /var/run + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: "2" + memory: 2Gi + - name: runner + image: gitea/act_runner:0.2.11 + env: + - name: DOCKER_HOST + value: "unix:///var/run/docker.sock" + - name: GITEA_INSTANCE_URL + value: "http://gitea-http.gitea.svc.cluster.local:3000" + - name: GITEA_RUNNER_REGISTRATION_TOKEN + valueFrom: + secretKeyRef: + name: test-env-runner-token + key: token + optional: true + # 1C server connection variables (for workflows) + - name: SRV1C_HOST + value: "onec-server.test-env.svc.cluster.local" + - name: SRV1C_PORT + value: "1540" + - name: RAC_HOST + value: "onec-server.test-env.svc.cluster.local" + - name: RAC_PORT + value: "1545" + - name: STORAGE_HOST + value: "onec-server.test-env.svc.cluster.local" + - name: STORAGE_PORT + value: "1542" + - name: PG_HOST + value: "postgres.test-env.svc.cluster.local" + - name: PG_PORT + value: "5432" + - name: PG_USER + value: "usr1cv8" + - name: PG_PASSWORD + valueFrom: + secretKeyRef: + name: test-env-secrets + key: pg-password + volumeMounts: + - name: docker-socket + mountPath: /var/run + - name: config + mountPath: /config + readOnly: true + - name: data + mountPath: /data + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: "2" + memory: 2Gi + volumes: + - name: docker-socket + emptyDir: {} + - name: config + configMap: + name: test-env-runner-config + - name: data + emptyDir: {} diff --git a/test-env/gitea-runner/register-job.yaml b/test-env/gitea-runner/register-job.yaml new file mode 100644 index 0000000..069ba85 --- /dev/null +++ b/test-env/gitea-runner/register-job.yaml @@ -0,0 +1,134 @@ +# Job: obtains Gitea runner registration token via API and creates +# the test-env-runner-token Secret. Run once after Gitea is available. +# +# Prerequisites: gitea-admin Secret in gitea namespace (created by deploy-k3s) +# The job resolves Gitea pod IP (headless svc) and calls the registration API. +apiVersion: batch/v1 +kind: Job +metadata: + name: register-test-env-runner + namespace: test-env + labels: + app: test-env-runner +spec: + backoffLimit: 3 + ttlSecondsAfterFinished: 300 + template: + spec: + serviceAccountName: runner-registrar + restartPolicy: OnFailure + containers: + - name: register + image: alpine/k8s:1.35.1 + command: + - sh + - -c + - | + set -e + + echo "=== Obtaining Gitea runner registration token ===" + + # Get Gitea admin credentials from gitea namespace + USER=$(kubectl -n gitea get secret gitea-admin -o jsonpath='{.data.username}' | base64 -d) + PASS=$(kubectl -n gitea get secret gitea-admin -o jsonpath='{.data.password}' | base64 -d) + + # Resolve Gitea pod IP (headless service) + GITEA_POD_IP=$(kubectl -n gitea get pod -l app.kubernetes.io/name=gitea \ + -o jsonpath='{.items[0].status.podIP}') + GITEA_URL="http://${GITEA_POD_IP}:3000" + + echo "Gitea URL: $GITEA_URL" + + # Wait for Gitea API to be ready + for i in $(seq 1 30); do + if curl -sf "$GITEA_URL/api/v1/version" > /dev/null 2>&1; then + echo "Gitea API is ready" + break + fi + echo "Waiting for Gitea API... ($i/30)" + sleep 5 + done + + # Get registration token + TOKEN=$(curl -sf -X POST -u "$USER:$PASS" \ + "$GITEA_URL/api/v1/user/actions/runners/registration-token" \ + | sed 's/.*"token":"\([^"]*\)".*/\1/') + + if [ -z "$TOKEN" ]; then + echo "ERROR: Failed to get registration token" + exit 1 + fi + + echo "Got registration token: ${TOKEN:0:8}..." + + # Create/update Secret in test-env namespace + kubectl -n test-env create secret generic test-env-runner-token \ + --from-literal=token="$TOKEN" \ + --dry-run=client -o yaml | kubectl apply -f - + + echo "=== Secret test-env-runner-token created ===" + + # Scale runner deployment to 1 + kubectl -n test-env scale deployment test-env-runner --replicas=1 + echo "=== Runner deployment scaled to 1 ===" +--- +# ServiceAccount + RBAC for the registration job +apiVersion: v1 +kind: ServiceAccount +metadata: + name: runner-registrar + namespace: test-env +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: runner-registrar + namespace: test-env +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create", "get", "update", "patch"] + - apiGroups: ["apps"] + resources: ["deployments/scale", "deployments"] + verbs: ["get", "update", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: runner-registrar + namespace: test-env +subjects: + - kind: ServiceAccount + name: runner-registrar + namespace: test-env +roleRef: + kind: Role + name: runner-registrar + apiGroup: rbac.authorization.k8s.io +--- +# ClusterRole to read gitea-admin secret from gitea namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: test-env-gitea-reader +rules: + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["gitea-admin"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-env-gitea-reader +subjects: + - kind: ServiceAccount + name: runner-registrar + namespace: test-env +roleRef: + kind: ClusterRole + name: test-env-gitea-reader + apiGroup: rbac.authorization.k8s.io diff --git a/test-env/kustomization.yaml b/test-env/kustomization.yaml new file mode 100644 index 0000000..749b3eb --- /dev/null +++ b/test-env/kustomization.yaml @@ -0,0 +1,17 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - namespace.yaml + # PostgreSQL 18.x-2.1C (image has built-in 1C entrypoint) + - postgres/statefulset.yaml + - postgres/service.yaml + # 1C:Enterprise server (ragent + crserver + ras) + - onec-server/statefulset.yaml + - onec-server/service.yaml + - onec-server/service-nodeport.yaml + - onec-server/configmap.yaml + # Gitea Actions runner (for apk-ci-ng workflows) + - gitea-runner/deployment.yaml + - gitea-runner/configmap.yaml + - gitea-runner/register-job.yaml diff --git a/test-env/namespace.yaml b/test-env/namespace.yaml new file mode 100644 index 0000000..46c2818 --- /dev/null +++ b/test-env/namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: test-env + labels: + name: test-env + environment: dev diff --git a/test-env/onec-server/configmap.yaml b/test-env/onec-server/configmap.yaml new file mode 100644 index 0000000..df10628 --- /dev/null +++ b/test-env/onec-server/configmap.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: onec-config + namespace: test-env +data: + # HASP license server configuration + # Points to external license server for 1C client mode + nethasp.ini: | + [NH_COMMON] + NH_TCPIP = Enabled + + [NH_TCPIP] + NH_SERVER_ADDR = 89.110.88.209 + NH_PORT_NUMBER = 475 + + # 1C server entrypoint: starts ragent, crserver, ras, sshd + # Based on docker-compose env service from tester.benadis.org + entrypoint.sh: | + #!/bin/bash + set -e + + ONEC_BASE="/opt/1cv8/x86_64" + # Auto-detect 1C version directory + ONEC_VER=$(ls -1 "$ONEC_BASE" | sort -V | tail -1) + ONEC_BIN="$ONEC_BASE/$ONEC_VER" + + echo "=== Starting 1C:Enterprise $ONEC_VER ===" + + mkdir -p /data/srv1c /data/storage + + # Start ragent (cluster manager) — port 1540 + $ONEC_BIN/ragent -port 1540 -regport 1541 -range 1560:1591 -d /data/srv1c & + + # Start crserver (configuration repository server) — port 1542 + $ONEC_BIN/crserver -port 1542 -d /data/storage & + + # Wait for ragent to start, then launch RAS + sleep 3 + $ONEC_BIN/ras cluster --port 1545 & + + # Start SSH daemon if available + if [ -x /usr/sbin/sshd ]; then + /usr/sbin/sshd 2>/dev/null || true + fi + + echo "Test environment ready (ragent:1540, crserver:1542, ras:1545)" + exec tail -f /dev/null diff --git a/test-env/onec-server/service-nodeport.yaml b/test-env/onec-server/service-nodeport.yaml new file mode 100644 index 0000000..975dc75 --- /dev/null +++ b/test-env/onec-server/service-nodeport.yaml @@ -0,0 +1,34 @@ +# NodePort service for external access to 1C server +# Accessed via SSH tunnels from connect-multi.ps1 +apiVersion: v1 +kind: Service +metadata: + name: onec-nodeport + namespace: test-env + labels: + app: onec-server +spec: + type: NodePort + selector: + app: onec-server + ports: + - name: ragent + port: 1540 + targetPort: 1540 + nodePort: 31540 + protocol: TCP + - name: regport + port: 1541 + targetPort: 1541 + nodePort: 31541 + protocol: TCP + - name: crserver + port: 1542 + targetPort: 1542 + nodePort: 31542 + protocol: TCP + - name: ras + port: 1545 + targetPort: 1545 + nodePort: 31545 + protocol: TCP diff --git a/test-env/onec-server/service.yaml b/test-env/onec-server/service.yaml new file mode 100644 index 0000000..51bf275 --- /dev/null +++ b/test-env/onec-server/service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: onec-server + namespace: test-env + labels: + app: onec-server +spec: + type: ClusterIP + selector: + app: onec-server + ports: + - name: ragent + port: 1540 + targetPort: 1540 + protocol: TCP + - name: regport + port: 1541 + targetPort: 1541 + protocol: TCP + - name: crserver + port: 1542 + targetPort: 1542 + protocol: TCP + - name: ras + port: 1545 + targetPort: 1545 + protocol: TCP diff --git a/test-env/onec-server/statefulset.yaml b/test-env/onec-server/statefulset.yaml new file mode 100644 index 0000000..2877096 --- /dev/null +++ b/test-env/onec-server/statefulset.yaml @@ -0,0 +1,107 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: onec-server + namespace: test-env + labels: + app: onec-server +spec: + serviceName: onec-server + replicas: 1 + selector: + matchLabels: + app: onec-server + template: + metadata: + labels: + app: onec-server + spec: + # Stable hostname for 1C community license (tied to hostname, not hardware) + hostname: test-env-0 + containers: + - name: onec + image: benadis/ar-edt:6.2.27.1 + command: ["/scripts/entrypoint.sh"] + env: + - name: LANG + value: "ru_RU.UTF-8" + - name: LC_ALL + value: "ru_RU.UTF-8" + - name: TZ + value: "Europe/Moscow" + - name: PGHOST + value: "postgres.test-env.svc.cluster.local" + - name: PGPORT + value: "5432" + - name: PGUSER + value: "usr1cv8" + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: test-env-secrets + key: pg-password + ports: + - name: ragent + containerPort: 1540 + protocol: TCP + - name: regport + containerPort: 1541 + protocol: TCP + - name: crserver + containerPort: 1542 + protocol: TCP + - name: ras + containerPort: 1545 + protocol: TCP + volumeMounts: + - name: onec-data + mountPath: /data + - name: onec-scripts + mountPath: /scripts + readOnly: true + - name: onec-nethasp + mountPath: /opt/1cv8/conf/nethasp.ini + subPath: nethasp.ini + readOnly: true + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "4" + memory: 4Gi + readinessProbe: + exec: + command: ["sh", "-c", "pgrep ragent && pgrep crserver"] + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: ["sh", "-c", "pgrep ragent"] + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + volumes: + - name: onec-scripts + configMap: + name: onec-config + items: + - key: entrypoint.sh + path: entrypoint.sh + mode: 0755 + - name: onec-nethasp + configMap: + name: onec-config + items: + - key: nethasp.ini + path: nethasp.ini + volumeClaimTemplates: + - metadata: + name: onec-data + spec: + storageClassName: local-path + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi diff --git a/test-env/postgres/configmap.yaml b/test-env/postgres/configmap.yaml new file mode 100644 index 0000000..e3e318a --- /dev/null +++ b/test-env/postgres/configmap.yaml @@ -0,0 +1,10 @@ +# PostgreSQL ConfigMap +# The benadis/pg-1c image has a built-in entrypoint that: +# 1. Configures postgresql.conf with 1C optimizations on first run +# 2. Sets pg_hba.conf for network access +# 3. Creates usr1cv8 superuser +# 4. Starts PostgreSQL +# +# No additional configuration needed — all settings are baked into the image. +# This file is kept as documentation placeholder. +# If custom settings are needed in the future, mount them via ConfigMap. diff --git a/test-env/postgres/service.yaml b/test-env/postgres/service.yaml new file mode 100644 index 0000000..a47f127 --- /dev/null +++ b/test-env/postgres/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: test-env + labels: + app: test-pg +spec: + type: ClusterIP + selector: + app: test-pg + ports: + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP diff --git a/test-env/postgres/statefulset.yaml b/test-env/postgres/statefulset.yaml new file mode 100644 index 0000000..76a5f5f --- /dev/null +++ b/test-env/postgres/statefulset.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-pg + namespace: test-env + labels: + app: test-pg +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: test-pg + template: + metadata: + labels: + app: test-pg + spec: + initContainers: + # On first run the PVC is empty — copy the pre-built PG cluster + # from the image so the main entrypoint can configure and start it. + - name: init-pgdata + image: benadis/pg-1c:18.1-2.1C + command: + - sh + - -c + - | + if [ ! -d /data/18/main ]; then + echo "Initializing PG data from image..." + cp -a /var/lib/postgresql/. /data/ + echo "Done." + else + echo "PG data already exists, skipping init." + fi + volumeMounts: + - name: pg-data + mountPath: /data + containers: + - name: postgres + image: benadis/pg-1c:18.1-2.1C + # Use the image's built-in entrypoint (configures 1C on first run) + env: + - name: LANG + value: "ru_RU.UTF-8" + - name: LC_ALL + value: "ru_RU.UTF-8" + - name: TZ + value: "Europe/Moscow" + ports: + - name: postgres + containerPort: 5432 + protocol: TCP + volumeMounts: + - name: pg-data + mountPath: /var/lib/postgresql + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "2" + memory: 4Gi + readinessProbe: + exec: + command: + - su + - "-" + - postgres + - "-c" + - "/usr/lib/postgresql/18/bin/pg_isready" + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: + - su + - "-" + - postgres + - "-c" + - "/usr/lib/postgresql/18/bin/pg_isready" + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 5 + volumeClaimTemplates: + - metadata: + name: pg-data + spec: + storageClassName: local-path + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 20Gi diff --git a/test-env/secrets/placeholder.yaml b/test-env/secrets/placeholder.yaml new file mode 100644 index 0000000..7018434 --- /dev/null +++ b/test-env/secrets/placeholder.yaml @@ -0,0 +1,22 @@ +# Placeholder for SOPS-encrypted secrets +# Actual secrets will be encrypted with: sops --encrypt --age , +# +# Required secrets (create as test-env-secrets): +# pg-password: password for PostgreSQL usr1cv8 user +# +# Required secrets (create as test-env-runner-token): +# token: Gitea Actions runner registration token +# +# Example (before encryption): +# apiVersion: v1 +# kind: Secret +# metadata: +# name: test-env-secrets +# namespace: test-env +# type: Opaque +# stringData: +# pg-password: "usr1cv8" +# +# For now, create secrets manually in the cluster: +# kubectl -n test-env create secret generic test-env-secrets --from-literal=pg-password=usr1cv8 +# kubectl -n test-env create secret generic test-env-runner-token --from-literal=token=