feat(test-env): add 1C test environment (#11)

* feat(test-env): add 1C test environment manifests (#11)

- PostgreSQL 18.x-2.1C StatefulSet with ru_RU.UTF-8 locale init
- 1C server (ragent+crserver+ras) StatefulSet with stable hostname
- Gitea runner Deployment with edt label for apk-ci-ng
- NodePort services for external 1C access (31540-31545)
- Deploy/verify script: dev/deploy-test-env.sh
- config.yaml for ApplicationSet integration
- test-env only in dev cluster (not in prod AppSet)

* fix(test-env): use initContainer for PG data + remove custom entrypoint

PVC mount on /var/lib/postgresql wipes the image's pre-built cluster.
Solution: initContainer copies cluster data from image to PVC on first run.
Removed custom pg-entrypoint.sh ConfigMap — image has its own.

* feat(test-env): DinD sidecar for runner + auto-registration Job

- Add Docker-in-Docker sidecar to gitea-runner Deployment
- Add register-job.yaml: Job that obtains Gitea runner token via API,
  creates Secret, and scales runner to 1
- RBAC: ServiceAccount + Role/ClusterRole for cross-namespace secret access
- Runner labels: edt (for apk-ci-ng), ubuntu-latest

---------

Co-authored-by: XoR <xor@benadis.ru>
This commit is contained in:
Dear XoR
2026-03-12 12:33:50 +03:00
committed by GitHub
parent 01623cb260
commit efb2427586
16 changed files with 782 additions and 0 deletions

View File

10
test-env/config.yaml Normal file
View File

@@ -0,0 +1,10 @@
{
"name": "test-env",
"namespace": "test-env",
"step": "6",
"source": {
"repoURL": "https://github.com/Kargones/deploy-app-kargo-private.git",
"path": "test-env",
"targetRevision": "main"
}
}

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-env-runner-config
namespace: test-env
data:
config.yaml: |
log:
level: info
runner:
file: .runner
capacity: 1
timeout: 3h
labels:
- "edt:docker://benadis/ar-edt-slim:latest"
- "ubuntu-latest:docker://node:20-bullseye"
cache:
enabled: true
dir: ""
container:
network: ""
privileged: false
options:
workdir_parent:

View File

@@ -0,0 +1,98 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-env-runner
namespace: test-env
labels:
app: test-env-runner
spec:
replicas: 0 # Scaled by register-job after token is obtained.
# NOTE: requires Docker-in-Docker (DinD) sidecar to run workflows.
# See TODO below for DinD configuration.
selector:
matchLabels:
app: test-env-runner
template:
metadata:
labels:
app: test-env-runner
spec:
containers:
# Docker-in-Docker sidecar (required for act_runner to execute workflows)
- name: dind
image: docker:27-dind
securityContext:
privileged: true
env:
- name: DOCKER_TLS_CERTDIR
value: ""
volumeMounts:
- name: docker-socket
mountPath: /var/run
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: "2"
memory: 2Gi
- name: runner
image: gitea/act_runner:0.2.11
env:
- name: DOCKER_HOST
value: "unix:///var/run/docker.sock"
- name: GITEA_INSTANCE_URL
value: "http://gitea-http.gitea.svc.cluster.local:3000"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: test-env-runner-token
key: token
optional: true
# 1C server connection variables (for workflows)
- name: SRV1C_HOST
value: "onec-server.test-env.svc.cluster.local"
- name: SRV1C_PORT
value: "1540"
- name: RAC_HOST
value: "onec-server.test-env.svc.cluster.local"
- name: RAC_PORT
value: "1545"
- name: STORAGE_HOST
value: "onec-server.test-env.svc.cluster.local"
- name: STORAGE_PORT
value: "1542"
- name: PG_HOST
value: "postgres.test-env.svc.cluster.local"
- name: PG_PORT
value: "5432"
- name: PG_USER
value: "usr1cv8"
- name: PG_PASSWORD
valueFrom:
secretKeyRef:
name: test-env-secrets
key: pg-password
volumeMounts:
- name: docker-socket
mountPath: /var/run
- name: config
mountPath: /config
readOnly: true
- name: data
mountPath: /data
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: "2"
memory: 2Gi
volumes:
- name: docker-socket
emptyDir: {}
- name: config
configMap:
name: test-env-runner-config
- name: data
emptyDir: {}

View File

@@ -0,0 +1,134 @@
# Job: obtains Gitea runner registration token via API and creates
# the test-env-runner-token Secret. Run once after Gitea is available.
#
# Prerequisites: gitea-admin Secret in gitea namespace (created by deploy-k3s)
# The job resolves Gitea pod IP (headless svc) and calls the registration API.
apiVersion: batch/v1
kind: Job
metadata:
name: register-test-env-runner
namespace: test-env
labels:
app: test-env-runner
spec:
backoffLimit: 3
ttlSecondsAfterFinished: 300
template:
spec:
serviceAccountName: runner-registrar
restartPolicy: OnFailure
containers:
- name: register
image: alpine/k8s:1.35.1
command:
- sh
- -c
- |
set -e
echo "=== Obtaining Gitea runner registration token ==="
# Get Gitea admin credentials from gitea namespace
USER=$(kubectl -n gitea get secret gitea-admin -o jsonpath='{.data.username}' | base64 -d)
PASS=$(kubectl -n gitea get secret gitea-admin -o jsonpath='{.data.password}' | base64 -d)
# Resolve Gitea pod IP (headless service)
GITEA_POD_IP=$(kubectl -n gitea get pod -l app.kubernetes.io/name=gitea \
-o jsonpath='{.items[0].status.podIP}')
GITEA_URL="http://${GITEA_POD_IP}:3000"
echo "Gitea URL: $GITEA_URL"
# Wait for Gitea API to be ready
for i in $(seq 1 30); do
if curl -sf "$GITEA_URL/api/v1/version" > /dev/null 2>&1; then
echo "Gitea API is ready"
break
fi
echo "Waiting for Gitea API... ($i/30)"
sleep 5
done
# Get registration token
TOKEN=$(curl -sf -X POST -u "$USER:$PASS" \
"$GITEA_URL/api/v1/user/actions/runners/registration-token" \
| sed 's/.*"token":"\([^"]*\)".*/\1/')
if [ -z "$TOKEN" ]; then
echo "ERROR: Failed to get registration token"
exit 1
fi
echo "Got registration token: ${TOKEN:0:8}..."
# Create/update Secret in test-env namespace
kubectl -n test-env create secret generic test-env-runner-token \
--from-literal=token="$TOKEN" \
--dry-run=client -o yaml | kubectl apply -f -
echo "=== Secret test-env-runner-token created ==="
# Scale runner deployment to 1
kubectl -n test-env scale deployment test-env-runner --replicas=1
echo "=== Runner deployment scaled to 1 ==="
---
# ServiceAccount + RBAC for the registration job
apiVersion: v1
kind: ServiceAccount
metadata:
name: runner-registrar
namespace: test-env
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: runner-registrar
namespace: test-env
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "update", "patch"]
- apiGroups: ["apps"]
resources: ["deployments/scale", "deployments"]
verbs: ["get", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: runner-registrar
namespace: test-env
subjects:
- kind: ServiceAccount
name: runner-registrar
namespace: test-env
roleRef:
kind: Role
name: runner-registrar
apiGroup: rbac.authorization.k8s.io
---
# ClusterRole to read gitea-admin secret from gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: test-env-gitea-reader
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["gitea-admin"]
verbs: ["get"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: test-env-gitea-reader
subjects:
- kind: ServiceAccount
name: runner-registrar
namespace: test-env
roleRef:
kind: ClusterRole
name: test-env-gitea-reader
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
# PostgreSQL 18.x-2.1C (image has built-in 1C entrypoint)
- postgres/statefulset.yaml
- postgres/service.yaml
# 1C:Enterprise server (ragent + crserver + ras)
- onec-server/statefulset.yaml
- onec-server/service.yaml
- onec-server/service-nodeport.yaml
- onec-server/configmap.yaml
# Gitea Actions runner (for apk-ci-ng workflows)
- gitea-runner/deployment.yaml
- gitea-runner/configmap.yaml
- gitea-runner/register-job.yaml

7
test-env/namespace.yaml Normal file
View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: test-env
labels:
name: test-env
environment: dev

View File

@@ -0,0 +1,48 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: onec-config
namespace: test-env
data:
# HASP license server configuration
# Points to external license server for 1C client mode
nethasp.ini: |
[NH_COMMON]
NH_TCPIP = Enabled
[NH_TCPIP]
NH_SERVER_ADDR = 89.110.88.209
NH_PORT_NUMBER = 475
# 1C server entrypoint: starts ragent, crserver, ras, sshd
# Based on docker-compose env service from tester.benadis.org
entrypoint.sh: |
#!/bin/bash
set -e
ONEC_BASE="/opt/1cv8/x86_64"
# Auto-detect 1C version directory
ONEC_VER=$(ls -1 "$ONEC_BASE" | sort -V | tail -1)
ONEC_BIN="$ONEC_BASE/$ONEC_VER"
echo "=== Starting 1C:Enterprise $ONEC_VER ==="
mkdir -p /data/srv1c /data/storage
# Start ragent (cluster manager) — port 1540
$ONEC_BIN/ragent -port 1540 -regport 1541 -range 1560:1591 -d /data/srv1c &
# Start crserver (configuration repository server) — port 1542
$ONEC_BIN/crserver -port 1542 -d /data/storage &
# Wait for ragent to start, then launch RAS
sleep 3
$ONEC_BIN/ras cluster --port 1545 &
# Start SSH daemon if available
if [ -x /usr/sbin/sshd ]; then
/usr/sbin/sshd 2>/dev/null || true
fi
echo "Test environment ready (ragent:1540, crserver:1542, ras:1545)"
exec tail -f /dev/null

View File

@@ -0,0 +1,34 @@
# NodePort service for external access to 1C server
# Accessed via SSH tunnels from connect-multi.ps1
apiVersion: v1
kind: Service
metadata:
name: onec-nodeport
namespace: test-env
labels:
app: onec-server
spec:
type: NodePort
selector:
app: onec-server
ports:
- name: ragent
port: 1540
targetPort: 1540
nodePort: 31540
protocol: TCP
- name: regport
port: 1541
targetPort: 1541
nodePort: 31541
protocol: TCP
- name: crserver
port: 1542
targetPort: 1542
nodePort: 31542
protocol: TCP
- name: ras
port: 1545
targetPort: 1545
nodePort: 31545
protocol: TCP

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: onec-server
namespace: test-env
labels:
app: onec-server
spec:
type: ClusterIP
selector:
app: onec-server
ports:
- name: ragent
port: 1540
targetPort: 1540
protocol: TCP
- name: regport
port: 1541
targetPort: 1541
protocol: TCP
- name: crserver
port: 1542
targetPort: 1542
protocol: TCP
- name: ras
port: 1545
targetPort: 1545
protocol: TCP

View File

@@ -0,0 +1,107 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: onec-server
namespace: test-env
labels:
app: onec-server
spec:
serviceName: onec-server
replicas: 1
selector:
matchLabels:
app: onec-server
template:
metadata:
labels:
app: onec-server
spec:
# Stable hostname for 1C community license (tied to hostname, not hardware)
hostname: test-env-0
containers:
- name: onec
image: benadis/ar-edt:6.2.27.1
command: ["/scripts/entrypoint.sh"]
env:
- name: LANG
value: "ru_RU.UTF-8"
- name: LC_ALL
value: "ru_RU.UTF-8"
- name: TZ
value: "Europe/Moscow"
- name: PGHOST
value: "postgres.test-env.svc.cluster.local"
- name: PGPORT
value: "5432"
- name: PGUSER
value: "usr1cv8"
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: test-env-secrets
key: pg-password
ports:
- name: ragent
containerPort: 1540
protocol: TCP
- name: regport
containerPort: 1541
protocol: TCP
- name: crserver
containerPort: 1542
protocol: TCP
- name: ras
containerPort: 1545
protocol: TCP
volumeMounts:
- name: onec-data
mountPath: /data
- name: onec-scripts
mountPath: /scripts
readOnly: true
- name: onec-nethasp
mountPath: /opt/1cv8/conf/nethasp.ini
subPath: nethasp.ini
readOnly: true
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: "4"
memory: 4Gi
readinessProbe:
exec:
command: ["sh", "-c", "pgrep ragent && pgrep crserver"]
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command: ["sh", "-c", "pgrep ragent"]
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
volumes:
- name: onec-scripts
configMap:
name: onec-config
items:
- key: entrypoint.sh
path: entrypoint.sh
mode: 0755
- name: onec-nethasp
configMap:
name: onec-config
items:
- key: nethasp.ini
path: nethasp.ini
volumeClaimTemplates:
- metadata:
name: onec-data
spec:
storageClassName: local-path
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi

View File

@@ -0,0 +1,10 @@
# PostgreSQL ConfigMap
# The benadis/pg-1c image has a built-in entrypoint that:
# 1. Configures postgresql.conf with 1C optimizations on first run
# 2. Sets pg_hba.conf for network access
# 3. Creates usr1cv8 superuser
# 4. Starts PostgreSQL
#
# No additional configuration needed — all settings are baked into the image.
# This file is kept as documentation placeholder.
# If custom settings are needed in the future, mount them via ConfigMap.

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: test-env
labels:
app: test-pg
spec:
type: ClusterIP
selector:
app: test-pg
ports:
- name: postgres
port: 5432
targetPort: 5432
protocol: TCP

View File

@@ -0,0 +1,93 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: test-pg
namespace: test-env
labels:
app: test-pg
spec:
serviceName: postgres
replicas: 1
selector:
matchLabels:
app: test-pg
template:
metadata:
labels:
app: test-pg
spec:
initContainers:
# On first run the PVC is empty — copy the pre-built PG cluster
# from the image so the main entrypoint can configure and start it.
- name: init-pgdata
image: benadis/pg-1c:18.1-2.1C
command:
- sh
- -c
- |
if [ ! -d /data/18/main ]; then
echo "Initializing PG data from image..."
cp -a /var/lib/postgresql/. /data/
echo "Done."
else
echo "PG data already exists, skipping init."
fi
volumeMounts:
- name: pg-data
mountPath: /data
containers:
- name: postgres
image: benadis/pg-1c:18.1-2.1C
# Use the image's built-in entrypoint (configures 1C on first run)
env:
- name: LANG
value: "ru_RU.UTF-8"
- name: LC_ALL
value: "ru_RU.UTF-8"
- name: TZ
value: "Europe/Moscow"
ports:
- name: postgres
containerPort: 5432
protocol: TCP
volumeMounts:
- name: pg-data
mountPath: /var/lib/postgresql
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: "2"
memory: 4Gi
readinessProbe:
exec:
command:
- su
- "-"
- postgres
- "-c"
- "/usr/lib/postgresql/18/bin/pg_isready"
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- su
- "-"
- postgres
- "-c"
- "/usr/lib/postgresql/18/bin/pg_isready"
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
volumeClaimTemplates:
- metadata:
name: pg-data
spec:
storageClassName: local-path
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

View File

@@ -0,0 +1,22 @@
# Placeholder for SOPS-encrypted secrets
# Actual secrets will be encrypted with: sops --encrypt --age <admin-key>,<dev-key>
#
# Required secrets (create as test-env-secrets):
# pg-password: password for PostgreSQL usr1cv8 user
#
# Required secrets (create as test-env-runner-token):
# token: Gitea Actions runner registration token
#
# Example (before encryption):
# apiVersion: v1
# kind: Secret
# metadata:
# name: test-env-secrets
# namespace: test-env
# type: Opaque
# stringData:
# pg-password: "usr1cv8"
#
# For now, create secrets manually in the cluster:
# kubectl -n test-env create secret generic test-env-secrets --from-literal=pg-password=usr1cv8
# kubectl -n test-env create secret generic test-env-runner-token --from-literal=token=<TOKEN>