diff --git a/Dockerfile b/Dockerfile index c8b59a2..0a34ea9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,30 @@ - -# Download go-cron ARG ALPINE_VERSION=3.21 FROM alpine:${ALPINE_VERSION} +WORKDIR / + +# Install tools for PostgreSQL, MariaDB, and AWS CLI RUN apk update && \ apk add --no-cache \ gnupg \ aws-cli \ postgresql-client \ - mysql-client mariadb-connector-c + mysql-client mariadb-connector-c \ + curl + +# Install MSSQL tools (sqlcmd) for Microsoft SQL Server on Alpine +# Source: https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-setup-tools +RUN curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_18.1.1.1-1_amd64.apk && \ + curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_18.1.1.1-1_amd64.apk && \ + apk add --allow-untrusted msodbcsql18_18.1.1.1-1_amd64.apk && \ + apk add --allow-untrusted mssql-tools18_18.1.1.1-1_amd64.apk && \ + rm msodbcsql18_18.1.1.1-1_amd64.apk mssql-tools18_18.1.1.1-1_amd64.apk RUN rm -rf /var/cache/apk/* +ENV PATH="${PATH}:/opt/mssql-tools18/bin" + ENV DATABASE_NAME '' ENV DATABASE_HOST '' ENV DATABASE_PORT '' @@ -22,6 +34,8 @@ ENV DATABASE_PASSWORD '' ENV PGDUMP_EXTRA_OPTS '' ENV MARIADB_DUMP_EXTRA_OPTS '' ENV MARIADB_EXTRA_OPTS '' +ENV MSSQL_EXTRA_OPTS '' +ENV MSSQL_DATA_DIR '/var/opt/mssql/data' ENV S3_ACCESS_KEY_ID '' ENV S3_SECRET_ACCESS_KEY '' ENV S3_BUCKET '' diff --git a/README.md b/README.md index ea88a2f..f9975ba 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,11 @@ # Introduction This project provides Docker images to periodically back up a database to AWS S3, and to restore from the backup as needed. +Supported databases: +- PostgreSQL +- MariaDB/MySQL +- Microsoft SQL Server (MSSQL) + # Usage ## Backup ```yaml @@ -26,9 +31,50 @@ services: DATABASE_NAME: dbname DATABASE_USER: user DATABASE_PASSWORD: password - DATABASE_SERVER: postgres + DATABASE_SERVER: postgres # postgres, mariadb, or mssql ``` +### MSSQL Example +**Note:** MSSQL backups use `sqlcmd` with the native `BACKUP DATABASE` command, which writes backup files server-side. This requires a shared volume between the MSSQL and backup containers. + +```yaml +services: + mssql: + image: mcr.microsoft.com/mssql/server:2022-latest + platform: linux/amd64 # Required for Apple Silicon Macs + environment: + ACCEPT_EULA: Y + MSSQL_SA_PASSWORD: YourStrong@Passw0rd + MSSQL_PID: Express + volumes: + - mssql-data:/var/opt/mssql # Shared volume + + backup: + image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21 + platform: linux/amd64 # Required for Apple Silicon Macs + volumes: + - mssql-data:/var/opt/mssql # Shared volume with MSSQL + environment: + SCHEDULE: '@daily' + S3_REGION: us-east-1 + S3_ACCESS_KEY_ID: your_key + S3_SECRET_ACCESS_KEY: your_secret + S3_BUCKET: my-bucket + S3_PREFIX: mssql-backup + DATABASE_HOST: mssql + DATABASE_PORT: 1433 + DATABASE_NAME: MyDatabase + DATABASE_USER: sa + DATABASE_PASSWORD: YourStrong@Passw0rd + DATABASE_SERVER: mssql + MSSQL_DATA_DIR: /var/opt/mssql/data # MSSQL data directory (where backups are temporarily stored) + +volumes: + mssql-data: # Shared volume for MSSQL data and backups +``` + +See [`docker-compose.yaml`](./docker-compose.yaml) for a complete working example. + - Images are tagged by the major PostgreSQL version supported: `11`, `12`, `13`, `14`, or `15`. - The `SCHEDULE` variable determines backup frequency. See go-cron schedules documentation [here](http://godoc.org/github.com/robfig/cron#hdr-Predefined_schedules). Omit to run the backup immediately and then exit. - If `PASSPHRASE` is provided, the backup will be encrypted using GPG. @@ -36,6 +82,217 @@ services: - If `BACKUP_KEEP_DAYS` is set, backups older than this many days will be deleted from S3. - Set `S3_ENDPOINT` if you're using a non-AWS S3-compatible storage provider. +## Kubernetes Examples + +### Standard CronJob (PostgreSQL, MariaDB) +PostgreSQL and MariaDB can use a standard Kubernetes CronJob since they use client-side backup tools (`pg_dump`, `mariadb-dump`) that don't require shared volumes: + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: postgres-backup +spec: + schedule: "0 2 * * *" # Daily at 2 AM + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: backup + image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21 + env: + - name: DATABASE_SERVER + value: "postgres" + - name: DATABASE_HOST + value: "postgres-service" + - name: DATABASE_PORT + value: "5432" + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: db-credentials + key: database + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: db-credentials + key: username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: db-credentials + key: password + - name: S3_REGION + value: "us-east-1" + - name: S3_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: s3-credentials + key: access-key-id + - name: S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: s3-credentials + key: secret-access-key + - name: S3_BUCKET + value: "my-backups" + - name: S3_PREFIX + value: "postgres-backups" + - name: BACKUP_KEEP_DAYS + value: "7" +``` + +### MSSQL CronJob Example + +> **Note:** For MSSQL StatefulSets with `ReadWriteOnce` volumes, use the [sidecar pattern](#mssql-with-statefulset-sidecar-pattern) instead. This CronJob example only works if you have a `ReadWriteMany` volume or a separate network-accessible MSSQL instance. + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: mssql-backup +spec: + schedule: "0 2 * * *" # Daily at 2 AM + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - name: backup + image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21 + env: + - name: DATABASE_SERVER + value: "mssql" + - name: DATABASE_HOST + value: "mssql-service" + - name: DATABASE_PORT + value: "1433" + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: db-credentials + key: database + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: db-credentials + key: username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: db-credentials + key: password + - name: MSSQL_DATA_DIR + value: "/var/opt/mssql/data" + - name: S3_REGION + value: "us-east-1" + - name: S3_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: s3-credentials + key: access-key-id + - name: S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: s3-credentials + key: secret-access-key + - name: S3_BUCKET + value: "my-backups" + - name: S3_PREFIX + value: "mssql-backups" + - name: BACKUP_KEEP_DAYS + value: "7" + volumeMounts: + - name: mssql-data + mountPath: /var/opt/mssql/data + volumes: + - name: mssql-data + persistentVolumeClaim: + claimName: mssql-data-pvc # Must be ReadWriteMany for CronJob +``` + +**Manual Backup Trigger:** +```bash +# Create a one-off job from the CronJob +kubectl create job --from=cronjob/mssql-backup manual-backup-$(date +%Y%m%d-%H%M%S) +``` + +### MSSQL with StatefulSet (Sidecar Pattern) + +For MSSQL StatefulSets with `ReadWriteOnce` volumes, use the **sidecar pattern** instead of a CronJob. This allows the backup container to share the same volume as the database container, which is required for `sqlcmd`'s native `BACKUP DATABASE` command. + +**Why Sidecar for MSSQL?** +- `ReadWriteOnce` volumes can only be mounted by one pod at a time +- MSSQL's `BACKUP DATABASE` writes files server-side to `/var/opt/mssql/data` +- A sidecar container in the same pod can access the same volume +- No need for complex volume mounting or client-side backup tools + +**Example: StatefulSet with Backup Sidecar** + +See [`tests/k8s-statefulset-with-sidecar.yaml`](./tests/k8s-statefulset-with-sidecar.yaml) for a complete example. + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mssql +spec: + # ... (your existing StatefulSet config) + template: + spec: + containers: + # MSSQL Container + - name: mssql + image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04 + # ... (your existing MSSQL config) + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data + + # Backup Sidecar Container + - name: backup + image: ghcr.io/your-org/db-backup-s3:latest + env: + - name: SCHEDULE + value: "0 2 * * *" # Daily at 2 AM + - name: DATABASE_SERVER + value: "mssql" + - name: DATABASE_HOST + value: "localhost" # Same pod + - name: DATABASE_PORT + value: "1433" + - name: MSSQL_DATA_DIR + value: "/var/opt/mssql/data" + # ... (S3 and DB credentials from secrets) + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data # Shared with MSSQL container +``` + +**Key Configuration:** +- `DATABASE_HOST: "localhost"` - Both containers are in the same pod +- `MSSQL_DATA_DIR: "/var/opt/mssql/data"` - MSSQL's data directory (where backup files are temporarily stored) +- Both containers mount the same volume at `/var/opt/mssql/data` +- Set `SCHEDULE` env var for automated backups (cron format) + +**Trigger Manual Backup:** +```bash +# Execute backup in the sidecar container +kubectl exec -it mssql-0 -c backup -- sh backup.sh +``` + +**Restore from Backup:** +```bash +# Restore latest backup +kubectl exec -it mssql-0 -c backup -- sh restore.sh + +# Restore specific backup by timestamp +kubectl exec -it mssql-0 -c backup -- sh restore.sh 2025-10-22T14:05:00 +``` + ## Restore > **WARNING:** DATA LOSS! All database objects will be dropped and re-created. ### ... from latest backup @@ -61,6 +318,29 @@ cp template.env .env docker compose up -d ``` +## Test Scripts + +### Docker Compose +```sh +# Test MSSQL backup/restore with Docker Compose +./tests/test-mssql.sh +``` + +### Kubernetes (Recommended - Everything in One Namespace) +```sh +# Complete automated test with local MinIO +# Creates mssql-backup-test namespace with BOTH MinIO and MSSQL +./tests/test-mssql-k8s-with-minio.sh + +# Manual test (if you already have S3/MinIO elsewhere) +NAMESPACE=mssql-backup-test S3_ENDPOINT=http://your-s3 ./tests/test-mssql-k8s.sh + +# Clean up (removes everything - one command!) +kubectl delete namespace mssql-backup-test +``` + +**Architecture:** MinIO and MSSQL run in the same namespace for simplified networking and easy cleanup. + # Acknowledgements This project is a fork and re-structuring @eeshugerman's fork of @schickling's [postgres-backup-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-backup-s3) and [postgres-restore-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-restore-s3). diff --git a/docker-compose.yaml b/docker-compose.yaml index 67ea50b..b533a8a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -16,21 +16,35 @@ services: MYSQL_DATABASE: database MYSQL_ROOT_PASSWORD: root_password + mssql: + platform: linux/amd64 + image: mcr.microsoft.com/mssql/server:2022-latest + environment: + ACCEPT_EULA: Y + MSSQL_SA_PASSWORD: YourStrong@Passw0rd + MSSQL_PID: Express + ports: + - "11433:1433" # Use port 11433 externally to avoid conflicts + volumes: + - mssql-data:/var/opt/mssql # Database storage and backup location (shared with backup container) + minio: - image: bitnami/minio + image: minio/minio:latest + command: server /data --console-address ":9001" ports: - 9000:9000 - 9001:9001 environment: MINIO_ROOT_USER: miniouser MINIO_ROOT_PASSWORD: minioroot - MINIO_DEFAULT_BUCKETS: backups + volumes: + - minio-data:/data backup-postgres: build: context: . args: - ALPINE_VERSION: '3.21' + ALPINE_VERSION: "3.21" environment: # SCHEDULE: '@weekly' # optional BACKUP_KEEP_DAYS: 7 # optional @@ -52,7 +66,7 @@ services: build: context: . args: - ALPINE_VERSION: '3.21' + ALPINE_VERSION: "3.21" environment: # SCHEDULE: '@weekly' # optional BACKUP_KEEP_DAYS: 7 # optional @@ -69,3 +83,32 @@ services: DATABASE_PORT: 3306 DATABASE_SERVER: mysql DATABASE_PASSWORD: root_password + + backup-mssql: + platform: linux/amd64 + build: + context: . + args: + ALPINE_VERSION: "3.21" + volumes: + - mssql-data:/var/opt/mssql # Shared volume for native BACKUP DATABASE command + environment: + # SCHEDULE: '@weekly' # optional + BACKUP_KEEP_DAYS: 7 # optional + # PASSPHRASE: passphrase # optional - uncomment to enable GPG encryption + # S3_REGION: + S3_ENDPOINT: http://minio:9000 + S3_ACCESS_KEY_ID: miniouser + S3_SECRET_ACCESS_KEY: minioroot + S3_BUCKET: backups + S3_PREFIX: mssql-backups + DATABASE_HOST: mssql + DATABASE_NAME: TestDB + DATABASE_USER: sa + DATABASE_PORT: 1433 + DATABASE_SERVER: mssql + DATABASE_PASSWORD: YourStrong@Passw0rd + MSSQL_DATA_DIR: /var/opt/mssql/data # Path where MSSQL backups are stored (must match volume mount) +volumes: + mssql-data: # MSSQL database storage + minio-data: # MinIO object storage diff --git a/src/backup.sh b/src/backup.sh index 9657cf0..f3c8d2d 100644 --- a/src/backup.sh +++ b/src/backup.sh @@ -10,16 +10,23 @@ echo "Creating backup of $DATABASE_NAME database..." backup timestamp=$(date +"%Y-%m-%dT%H:%M:%S") -s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.dump" + +# MSSQL uses .bak extension, other databases use .dump +if [ "$DATABASE_SERVER" = "mssql" ]; then + local_file="${MSSQL_DATA_DIR}/db.bak" + s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.bak" +else + local_file="db.dump" + s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.dump" +fi if [ -n "$PASSPHRASE" ]; then echo "Encrypting backup..." - gpg --symmetric --batch --passphrase "$PASSPHRASE" db.dump - rm db.dump - local_file="db.dump.gpg" + gpg --symmetric --batch --passphrase "$PASSPHRASE" "$local_file" + rm "$local_file" + local_file="${local_file}.gpg" s3_uri="${s3_uri_base}.gpg" else - local_file="db.dump" s3_uri="$s3_uri_base" fi diff --git a/src/env.sh b/src/env.sh index 414227d..77a6562 100644 --- a/src/env.sh +++ b/src/env.sh @@ -4,7 +4,7 @@ if [ -z "$S3_BUCKET" ]; then fi if [ -z "$DATABASE_SERVER" ]; then - echo "You need to set the DATABASE_SERVER environment variable. (postgres, mariadb)" + echo "You need to set the DATABASE_SERVER environment variable. (postgres, mariadb, mssql)" exit 1 fi diff --git a/src/helpers.sh b/src/helpers.sh index 53167c7..844c10b 100644 --- a/src/helpers.sh +++ b/src/helpers.sh @@ -6,6 +6,8 @@ backup() { backup_postgres elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then backup_mariadb + elif [[ "$DATABASE_SERVER" == "mssql" ]]; then + backup_mssql else echo "Unknown database server: $DATABASE_SERVER" exit 1 @@ -17,6 +19,8 @@ restore() { restore_postgres elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then restore_mariadb + elif [[ "$DATABASE_SERVER" == "mssql" ]]; then + restore_mssql else echo "Unknown database server: $DATABASE_SERVER" exit 1 @@ -55,3 +59,40 @@ restore_mariadb() { --password="$DATABASE_PASSWORD" "$MARIADB_EXTRA_OPTS" \ $DATABASE_NAME < db.dump } + +backup_mssql() { + # Use native BACKUP DATABASE command + # Note: Requires shared volume mounted at MSSQL_DATA_DIR + sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \ + -U ${DATABASE_USER} \ + -P "${DATABASE_PASSWORD}" \ + -C \ + -Q "BACKUP DATABASE [${DATABASE_NAME}] TO DISK = N'${MSSQL_DATA_DIR}/db.bak' WITH INIT;" \ + $MSSQL_EXTRA_OPTS +} + +restore_mssql() { + echo "Restoring from backup..." + # Get logical file names from the backup + logical_files=$(sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \ + -U ${DATABASE_USER} \ + -P "${DATABASE_PASSWORD}" \ + -C -W \ + -Q "SET NOCOUNT ON; RESTORE FILELISTONLY FROM DISK = N'${MSSQL_DATA_DIR}/db.bak';" \ + | grep -v '^$' | awk '{print $1}' | tail -n +3) + + # Parse logical names (first two lines after headers) + data_file=$(echo "$logical_files" | sed -n '1p') + log_file=$(echo "$logical_files" | sed -n '2p') + + # Restore database with MOVE options + sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \ + -U ${DATABASE_USER} \ + -P "${DATABASE_PASSWORD}" \ + -C \ + -Q "RESTORE DATABASE [${DATABASE_NAME}] FROM DISK = N'${MSSQL_DATA_DIR}/db.bak' WITH REPLACE, MOVE N'${data_file}' TO N'${MSSQL_DATA_DIR}/${DATABASE_NAME}.mdf', MOVE N'${log_file}' TO N'${MSSQL_DATA_DIR}/${DATABASE_NAME}_log.ldf';" \ + $MSSQL_EXTRA_OPTS + + # Clean up backup file + rm "${MSSQL_DATA_DIR}/db.bak" +} diff --git a/src/restore.sh b/src/restore.sh index 1745731..4747013 100644 --- a/src/restore.sh +++ b/src/restore.sh @@ -8,10 +8,21 @@ source ./helpers.sh s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}" -if [ -z "$PASSPHRASE" ]; then - file_type=".dump" +# MSSQL uses .bak extension, other databases use .dump +if [ "$DATABASE_SERVER" = "mssql" ]; then + backup_file="${MSSQL_DATA_DIR}/db.bak" + if [ -z "$PASSPHRASE" ]; then + file_type=".bak" + else + file_type=".bak.gpg" + fi else - file_type=".dump.gpg" + backup_file="db.dump" + if [ -z "$PASSPHRASE" ]; then + file_type=".dump" + else + file_type=".dump.gpg" + fi fi if [ $# -eq 1 ]; then @@ -28,16 +39,22 @@ else fi echo "Fetching backup from S3..." -aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "db${file_type}" - if [ -n "$PASSPHRASE" ]; then + aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "${backup_file}.gpg" echo "Decrypting backup..." - gpg --decrypt --batch --passphrase "$PASSPHRASE" db.dump.gpg > db.dump - rm db.dump.gpg + gpg --decrypt --batch --passphrase "$PASSPHRASE" "${backup_file}.gpg" > "${backup_file}" + rm "${backup_file}.gpg" +else + aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "${backup_file}" fi echo "Restoring from backup..." restore -rm db.dump + +# Clean up backup file +# Note: For MSSQL, the file is in MSSQL_DATA_DIR and cleanup happens in restore_mssql() +if [ "$DATABASE_SERVER" != "mssql" ]; then + rm "${backup_file}" +fi echo "Restore complete." diff --git a/src/run.sh b/src/run.sh index dfb1d65..a80f9cd 100644 --- a/src/run.sh +++ b/src/run.sh @@ -11,8 +11,22 @@ fi if [ -z "$SCHEDULE" ]; then sh backup.sh else - # Use crond from busybox which is available in Alpine - echo "$SCHEDULE /bin/sh $(pwd)/backup.sh" > /etc/crontabs/root - # Start crond in foreground mode - exec crond -f -d 8 + # For non-root users, use a writable directory for crontabs + # busybox crond supports -c option to specify crontab directory + CRON_USER=$(id -u) + CRON_DIR="${HOME}/crontabs" + + # Create crontab directory + mkdir -p "$CRON_DIR" + + # Write crontab entry + echo "$SCHEDULE /bin/sh $(pwd)/backup.sh" > "$CRON_DIR/$CRON_USER" + chmod 600 "$CRON_DIR/$CRON_USER" + + echo "Backup schedule configured: $SCHEDULE" + echo "Crontab file: $CRON_DIR/$CRON_USER" + echo "Starting crond..." + + # Start crond in foreground mode with custom crontab directory + exec crond -f -d 8 -c "$CRON_DIR" fi diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..8fc75b8 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,56 @@ +# Test Scripts + +This directory contains automated test scripts for validating the backup and restore functionality across different environments. + +## Files + +### Docker Compose Tests +- **`test-mssql.sh`**: Tests MSSQL backup/restore using Docker Compose with local MinIO + +### Kubernetes Tests +- **`test-mssql-k8s.sh`**: Tests MSSQL StatefulSet with backup sidecar (requires existing S3/MinIO) +- **`test-mssql-k8s-with-minio.sh`**: Complete automated test that deploys MinIO alongside MSSQL +- **`setup-minio-k8s.sh`**: Helper script to deploy MinIO in Kubernetes + +### Kubernetes Configuration Examples +- **`k8s-statefulset-with-sidecar.yaml`**: Production-ready MSSQL StatefulSet with backup sidecar +- **`k8s-statefulset-test.yaml`**: Test StatefulSet configuration used by automated test scripts +- **`k8s-mssql-configmap-example.yaml`**: Example ConfigMap for non-sensitive configuration +- **`k8s-mssql-secret-example.yaml`**: Example Secret for sensitive credentials + +## Usage + +### Quick Start (Recommended) +Run the complete automated test with MinIO: +```bash +./tests/test-mssql-k8s-with-minio.sh +``` + +This will: +- Create a test namespace (`mssql-backup-test`) +- Deploy MinIO +- Deploy MSSQL StatefulSet with backup sidecar +- Run backup and restore tests +- Verify encryption is working + +### Manual Kubernetes Test +If you have an existing S3 endpoint: +```bash +NAMESPACE=mssql-backup-test \ +S3_ENDPOINT=http://your-s3:9000 \ +S3_ACCESS_KEY_ID=your-key \ +S3_SECRET_ACCESS_KEY=your-secret \ +./tests/test-mssql-k8s.sh +``` + +### Docker Compose Test +```bash +./tests/test-mssql.sh +``` + +## Cleanup +Delete the test namespace to remove all resources: +```bash +kubectl delete namespace mssql-backup-test +``` + diff --git a/tests/k8s-mssql-configmap-example.yaml b/tests/k8s-mssql-configmap-example.yaml new file mode 100644 index 0000000..dffe144 --- /dev/null +++ b/tests/k8s-mssql-configmap-example.yaml @@ -0,0 +1,98 @@ +# Example mssql-config ConfigMap +# This ConfigMap contains non-sensitive configuration for MSSQL and backup sidecar +# +# Usage: +# 1. Copy this file and update with your actual values +# 2. Apply: kubectl apply -f tests/k8s-mssql-configmap.yaml +# 3. Create secret: kubectl apply -f tests/k8s-mssql-secret.yaml +# 4. Deploy: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml + +apiVersion: v1 +kind: ConfigMap +metadata: + name: mssql-config + namespace: default # Update with your namespace +data: + # ============================================ + # Database Configuration + # ============================================ + DATABASE_SERVER: "mssql" + DATABASE_HOST: "mssql-service" # MSSQL hostname (overridden to "localhost" in sidecar pattern) + DATABASE_NAME: "MyDatabase" # The database to backup + DATABASE_PORT: "1433" + + # ============================================ + # MSSQL Specific Configuration + # ============================================ + MSSQL_DATA_DIR: "/var/opt/mssql/data" + # MSSQL_EXTRA_OPTS: "" # Additional sqlcmd options if needed + + # ============================================ + # Backup Schedule and Retention + # ============================================ + SCHEDULE: "0 2 * * *" # Daily at 2 AM (cron format) + BACKUP_KEEP_DAYS: "7" # Keep backups for 7 days + + # Cron schedule examples: + # "@daily" - Once per day at midnight + # "@weekly" - Once per week at midnight Sunday + # "@hourly" - Once per hour + # "0 */6 * * *" - Every 6 hours + # "0 2 * * *" - Every day at 2 AM + # "0 3 * * 0" - Every Sunday at 3 AM + # "0 0 1 * *" - First day of every month at midnight + + # ============================================ + # AWS S3 Configuration + # ============================================ + S3_BUCKET: "my-database-backups" + S3_PREFIX: "mssql-backups" + S3_REGION: "us-east-1" + + # ============================================ + # Optional: S3-Compatible Storage (MinIO, Wasabi, etc.) + # ============================================ + # Uncomment and configure if using non-AWS S3-compatible storage + S3_ENDPOINT: "https://s3.example.com" # Your S3-compatible endpoint + S3_S3V4: "yes" # Use Signature Version 4 + + # Common S3-compatible endpoints: + # MinIO: "https://minio.example.com" + # Wasabi: "https://s3.wasabisys.com" + # DigitalOcean: "https://nyc3.digitaloceanspaces.com" + # Backblaze B2: "https://s3.us-west-001.backblazeb2.com" + +--- +# Notes: +# +# 1. ConfigMap vs Secret: +# - ConfigMap: Non-sensitive configuration (endpoints, names, schedules) +# - Secret: Sensitive data (passwords, access keys) +# +# 2. To update ConfigMap after deployment: +# kubectl apply -f k8s-mssql-configmap.yaml +# kubectl rollout restart statefulset/mssql +# +# 3. To view the ConfigMap: +# kubectl get configmap mssql-config -o yaml +# +# 4. DATABASE_HOST behavior: +# - In sidecar pattern: Set to "localhost" (overridden in StatefulSet) +# - In CronJob pattern: Use service name like "mssql-service" +# The ConfigMap default is for CronJob; sidecar overrides it in the pod spec. +# +# 5. Alternative: Create from command line: +# kubectl create configmap mssql-config \ +# --from-literal=DATABASE_SERVER='mssql' \ +# --from-literal=DATABASE_HOST='mssql-service' \ +# --from-literal=DATABASE_NAME='MyDatabase' \ +# --from-literal=DATABASE_PORT='1433' \ +# --from-literal=MSSQL_DATA_DIR='/var/opt/mssql/data' \ +# --from-literal=SCHEDULE='0 2 * * *' \ +# --from-literal=BACKUP_KEEP_DAYS='7' \ +# --from-literal=S3_BUCKET='my-backups' \ +# --from-literal=S3_PREFIX='mssql-backups' \ +# --from-literal=S3_REGION='us-east-1' \ +# --from-literal=S3_ENDPOINT='https://s3.example.com' \ +# --from-literal=S3_S3V4='yes' + diff --git a/tests/k8s-mssql-secret-example.yaml b/tests/k8s-mssql-secret-example.yaml new file mode 100644 index 0000000..c4ef53d --- /dev/null +++ b/tests/k8s-mssql-secret-example.yaml @@ -0,0 +1,72 @@ +# Example mssql-general Secret +# This secret contains ONLY sensitive credentials (passwords, keys) +# Non-sensitive configuration is in the ConfigMap (tests/k8s-mssql-configmap-example.yaml) +# +# Usage: +# 1. Copy this file and update with your actual values +# 2. Apply ConfigMap: kubectl apply -f tests/k8s-mssql-configmap.yaml +# 3. Apply Secret: kubectl apply -f tests/k8s-mssql-secret.yaml +# 4. Deploy: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml + +apiVersion: v1 +kind: Secret +metadata: + name: mssql-general + namespace: default # Update with your namespace +type: Opaque +stringData: + # ============================================ + # MSSQL Server Credentials + # ============================================ + MSSQL_SA_PASSWORD: "YourStrong@Passw0rd" # Must meet SQL Server complexity requirements + + # ============================================ + # Database Backup Credentials + # ============================================ + DATABASE_USER: "sa" + DATABASE_PASSWORD: "YourStrong@Passw0rd" # Same as MSSQL_SA_PASSWORD + + # ============================================ + # AWS S3 Credentials + # ============================================ + S3_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE" + S3_SECRET_ACCESS_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + + # ============================================ + # Optional: GPG Encryption Passphrase + # ============================================ + # Uncomment to enable encryption of backup files before upload + # PASSPHRASE: "my-super-secret-gpg-passphrase" + +--- +# Notes: +# +# 1. Password Requirements: +# - MSSQL_SA_PASSWORD must meet SQL Server complexity requirements: +# * At least 8 characters +# * Mix of uppercase, lowercase, digits, and symbols +# * Example: "MyP@ssw0rd123" +# +# 2. For production, consider using: +# - SealedSecrets: https://github.com/bitnami-labs/sealed-secrets +# - External Secrets Operator: https://external-secrets.io/ +# - AWS Secrets Manager, Azure Key Vault, or HashiCorp Vault +# +# 3. Non-sensitive configuration (bucket, endpoints, schedule): +# These are now in the ConfigMap (tests/k8s-mssql-configmap-example.yaml) +# This keeps secrets clean and allows easier configuration changes +# +# 4. Alternative: Create secret from command line: +# kubectl create secret generic mssql-general \ +# --from-literal=MSSQL_SA_PASSWORD='YourStrong@Passw0rd' \ +# --from-literal=DATABASE_USER='sa' \ +# --from-literal=DATABASE_PASSWORD='YourStrong@Passw0rd' \ +# --from-literal=S3_ACCESS_KEY_ID='YOUR_KEY' \ +# --from-literal=S3_SECRET_ACCESS_KEY='YOUR_SECRET' +# +# 4. To view the secret (base64 encoded): +# kubectl get secret mssql-general -o yaml +# +# 5. To decode a specific key: +# kubectl get secret mssql-general -o jsonpath='{.data.DATABASE_NAME}' | base64 -d + diff --git a/tests/k8s-statefulset-test.yaml b/tests/k8s-statefulset-test.yaml new file mode 100644 index 0000000..28df03a --- /dev/null +++ b/tests/k8s-statefulset-test.yaml @@ -0,0 +1,138 @@ +# MSSQL StatefulSet with Backup Sidecar +# +# This configuration runs a backup container as a sidecar alongside MSSQL Server. +# Both containers share the same volume, allowing the backup container to access +# MSSQL's native backup files. +# +# Prerequisites: +# 1. Create the ConfigMap: kubectl apply -f tests/k8s-mssql-configmap-example.yaml +# 2. Create the Secret: kubectl apply -f tests/k8s-mssql-secret-example.yaml +# 3. Apply this StatefulSet: kubectl apply -f tests/k8s-statefulset-test.yaml +# +# The backup container will automatically run backups according to the SCHEDULE. +# +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mssql +spec: + replicas: 1 + selector: + matchLabels: + app: mssql + serviceName: mssql + template: + metadata: + labels: + app: mssql + spec: + containers: + # MSSQL Server Container + - name: mssql + image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04 + ports: + - containerPort: 1433 + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Express + envFrom: + - secretRef: + name: mssql-general + resources: + limits: + memory: 4Gi + requests: + cpu: 100m + memory: 4Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data + + # Backup Sidecar Container + - name: backup + image: reg.dev.krd/db-backup-s3/db-backup-s3:test + imagePullPolicy: Always # Update with your image + # Load configuration from ConfigMap and Secret + envFrom: + - configMapRef: + name: mssql-config # Non-sensitive config (schedule, bucket, endpoint) + - secretRef: + name: mssql-general # Sensitive credentials (passwords, keys) + + # Override specific values after loading from ConfigMap/Secret + env: + # Override DATABASE_HOST from ConfigMap since we're in the same pod + - name: DATABASE_HOST + value: "localhost" # Sidecar uses localhost; ConfigMap default is for CronJob pattern + # Set HOME to writable location for AWS CLI + - name: HOME + value: "/tmp" + + resources: + limits: + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false # Needs write access for temp backup files + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data + + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi + +--- +# Configuration Structure +# +# This StatefulSet uses a ConfigMap for non-sensitive config and a Secret for credentials: +# +# ConfigMap (mssql-config) - See tests/k8s-mssql-configmap-example.yaml +# - DATABASE_SERVER, DATABASE_HOST, DATABASE_NAME, DATABASE_PORT +# - MSSQL_DATA_DIR, MSSQL_EXTRA_OPTS +# - SCHEDULE, BACKUP_KEEP_DAYS +# - S3_BUCKET, S3_PREFIX, S3_REGION, S3_ENDPOINT, S3_S3V4 +# Note: DATABASE_HOST is overridden to "localhost" in the StatefulSet for sidecar pattern +# +# Secret (mssql-general) - See tests/k8s-mssql-secret-example.yaml +# - MSSQL_SA_PASSWORD +# - DATABASE_USER, DATABASE_PASSWORD +# - S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY +# - PASSPHRASE (optional, for GPG encryption) +# +# Benefits of separating ConfigMap and Secret: +# - Easier to update non-sensitive configuration +# - Better security practices (minimal secret exposure) +# - ConfigMap changes don't require secret rotation +# - Can use different RBAC policies for each + diff --git a/tests/k8s-statefulset-with-sidecar.yaml b/tests/k8s-statefulset-with-sidecar.yaml new file mode 100644 index 0000000..b3475b3 --- /dev/null +++ b/tests/k8s-statefulset-with-sidecar.yaml @@ -0,0 +1,137 @@ +# MSSQL StatefulSet with Backup Sidecar +# +# This configuration runs a backup container as a sidecar alongside MSSQL Server. +# Both containers share the same volume, allowing the backup container to access +# MSSQL's native backup files. +# +# Prerequisites: +# 1. Create the ConfigMap: kubectl apply -f tests/k8s-mssql-configmap-example.yaml +# 2. Create the Secret: kubectl apply -f tests/k8s-mssql-secret-example.yaml +# 3. Apply this StatefulSet: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml +# +# The backup container will automatically run backups according to the SCHEDULE. +# +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mssql +spec: + replicas: 1 + selector: + matchLabels: + app: mssql + serviceName: mssql + template: + metadata: + labels: + app: mssql + spec: + containers: + # MSSQL Server Container + - name: mssql + image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04 + ports: + - containerPort: 1433 + env: + - name: ACCEPT_EULA + value: "Y" + - name: MSSQL_PID + value: Express + envFrom: + - secretRef: + name: mssql-general + resources: + limits: + memory: 4Gi + requests: + cpu: 100m + memory: 4Gi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data + + # Backup Sidecar Container + - name: backup + image: ghcr.io/your-org/db-backup-s3:latest # Update with your image + # Load configuration from ConfigMap and Secret + envFrom: + - configMapRef: + name: mssql-config # Non-sensitive config (schedule, bucket, endpoint) + - secretRef: + name: mssql-general # Sensitive credentials (passwords, keys) + + # Override specific values after loading from ConfigMap/Secret + env: + # Override DATABASE_HOST from ConfigMap since we're in the same pod + - name: DATABASE_HOST + value: "localhost" # Sidecar uses localhost; ConfigMap default is for CronJob pattern + # Set HOME to writable location for AWS CLI + - name: HOME + value: "/tmp" + + resources: + limits: + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false # Needs write access for temp backup files + volumeMounts: + - mountPath: /var/opt/mssql/data + name: data + + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi + +--- +# Configuration Structure +# +# This StatefulSet uses a ConfigMap for non-sensitive config and a Secret for credentials: +# +# ConfigMap (mssql-config) - See tests/k8s-mssql-configmap-example.yaml +# - DATABASE_SERVER, DATABASE_HOST, DATABASE_NAME, DATABASE_PORT +# - MSSQL_DATA_DIR, MSSQL_EXTRA_OPTS +# - SCHEDULE, BACKUP_KEEP_DAYS +# - S3_BUCKET, S3_PREFIX, S3_REGION, S3_ENDPOINT, S3_S3V4 +# Note: DATABASE_HOST is overridden to "localhost" in the StatefulSet for sidecar pattern +# +# Secret (mssql-general) - See tests/k8s-mssql-secret-example.yaml +# - MSSQL_SA_PASSWORD +# - DATABASE_USER, DATABASE_PASSWORD +# - S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY +# - PASSPHRASE (optional, for GPG encryption) +# +# Benefits of separating ConfigMap and Secret: +# - Easier to update non-sensitive configuration +# - Better security practices (minimal secret exposure) +# - ConfigMap changes don't require secret rotation +# - Can use different RBAC policies for each + diff --git a/tests/setup-minio-k8s.sh b/tests/setup-minio-k8s.sh new file mode 100755 index 0000000..082488a --- /dev/null +++ b/tests/setup-minio-k8s.sh @@ -0,0 +1,127 @@ +#!/bin/bash +set -e + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${BLUE}๐Ÿš€ Setting up MinIO in Kubernetes for testing...${NC}" + +MINIO_NAMESPACE="${NAMESPACE:-mssql-backup-test}" +MINIO_USER="minioadmin" +MINIO_PASSWORD="minioadmin" +BUCKET_NAME="backups" + +echo "" +echo -e "${YELLOW}๐Ÿ“ฆ Creating namespace: $MINIO_NAMESPACE${NC}" +kubectl create namespace $MINIO_NAMESPACE --dry-run=client -o yaml | kubectl apply -f - + +echo "" +echo -e "${YELLOW}๐Ÿ—„๏ธ Deploying MinIO...${NC}" +kubectl apply -f - </dev/null | grep -c "minio" || true) + +if [ "$MINIO_EXISTS" -eq 0 ]; then + echo -e "${YELLOW}๐Ÿ“ฆ MinIO not found. Deploying MinIO in $TEST_NAMESPACE...${NC}" + NAMESPACE=$TEST_NAMESPACE "$(dirname "$0")/setup-minio-k8s.sh" +else + echo -e "${GREEN}โœ… MinIO already running in $TEST_NAMESPACE namespace${NC}" + + # Verify MinIO is ready + echo -e "${YELLOW}โณ Checking MinIO status...${NC}" + kubectl wait --for=condition=ready pod/minio -n $TEST_NAMESPACE --timeout=60s + + # Ensure bucket exists + echo -e "${YELLOW}๐Ÿ“ฆ Ensuring bucket exists: $BUCKET_NAME${NC}" + kubectl exec -n $TEST_NAMESPACE minio -- sh -c " + mc alias set local http://localhost:9000 $MINIO_USER $MINIO_PASSWORD 2>/dev/null && \ + mc mb local/$BUCKET_NAME --ignore-existing 2>/dev/null + " || echo "Bucket already exists or created" +fi + +echo "" +echo -e "${BLUE}======================================${NC}" +echo -e "${BLUE}๐Ÿงช Running MSSQL Backup Test${NC}" +echo -e "${BLUE}======================================${NC}" +echo "" + +# Run the test with MinIO configuration (same namespace) +NAMESPACE="$TEST_NAMESPACE" \ +STATEFULSET_FILE="$(dirname "$0")/k8s-statefulset-test.yaml" \ +S3_ENDPOINT="$MINIO_ENDPOINT" \ +S3_ACCESS_KEY_ID="$MINIO_USER" \ +S3_SECRET_ACCESS_KEY="$MINIO_PASSWORD" \ +S3_BUCKET="$BUCKET_NAME" \ +"$(dirname "$0")/test-mssql-k8s.sh" + +echo "" +echo -e "${GREEN}๐ŸŽ‰ All tests completed successfully!${NC}" +echo "" +echo -e "${BLUE}๐Ÿ“Š View backups in MinIO:${NC}" +echo -e " kubectl exec -n $TEST_NAMESPACE minio -- mc ls local/$BUCKET_NAME/mssql-backups/" +echo "" +echo -e "${BLUE}๐ŸŒ Access MinIO Console:${NC}" +echo -e " kubectl port-forward -n $TEST_NAMESPACE pod/minio 9001:9001" +echo -e " Then open: http://localhost:9001" +echo -e " Login: $MINIO_USER / $MINIO_PASSWORD" +echo "" +echo -e "${BLUE}๐Ÿงน Cleanup (everything in one namespace):${NC}" +echo -e " kubectl delete namespace $TEST_NAMESPACE" +echo "" + diff --git a/tests/test-mssql-k8s.sh b/tests/test-mssql-k8s.sh new file mode 100755 index 0000000..50dd48f --- /dev/null +++ b/tests/test-mssql-k8s.sh @@ -0,0 +1,236 @@ +#!/bin/bash +set -e + +# Colors for output +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +echo -e "${BLUE}๐Ÿš€ Starting Kubernetes MSSQL backup test...${NC}" + +# Configuration +NAMESPACE="${NAMESPACE:-mssql-backup-test}" +STATEFULSET_NAME="mssql" +POD_NAME="mssql-0" +MSSQL_PASSWORD="YourStrong@Passw0rd" +DATABASE_NAME="TestDB" +MSSQL_DATA_DIR="${MSSQL_DATA_DIR:-/var/opt/mssql/data}" +S3_BUCKET="${S3_BUCKET:-test-backups}" +S3_ENDPOINT="${S3_ENDPOINT:-}" # Set this if using MinIO or other S3-compatible storage +STATEFULSET_FILE="${STATEFULSET_FILE:-$(dirname "$0")/k8s-statefulset-with-sidecar.yaml}" + +# Cleanup function +cleanup() { + echo "" + echo -e "${YELLOW}โœจ Cleaning up resources...${NC}" + kubectl delete statefulset $STATEFULSET_NAME -n $NAMESPACE --ignore-not-found=true + kubectl delete pvc -l app=mssql -n $NAMESPACE --ignore-not-found=true + kubectl delete configmap mssql-config -n $NAMESPACE --ignore-not-found=true + kubectl delete secret mssql-general -n $NAMESPACE --ignore-not-found=true + + # Optionally delete the namespace (uncomment to auto-delete) + # kubectl delete namespace $NAMESPACE --ignore-not-found=true + + echo -e "${GREEN}๐ŸŽ‰ Cleanup complete!${NC}" + echo -e "${BLUE}๐Ÿ’ก To delete the namespace (including MinIO if present):${NC}" + echo -e "${BLUE} kubectl delete namespace $NAMESPACE${NC}" +} + +# Trap cleanup on exit +trap cleanup EXIT + +echo "" +echo -e "${YELLOW}๐Ÿ“ฆ Creating namespace: $NAMESPACE${NC}" +kubectl create namespace $NAMESPACE --dry-run=client -o yaml | kubectl apply -f - + +echo "" +echo -e "${YELLOW}๐Ÿงน Cleaning up any existing resources in namespace...${NC}" +kubectl delete statefulset $STATEFULSET_NAME -n $NAMESPACE --ignore-not-found=true +kubectl delete pvc -l app=mssql -n $NAMESPACE --ignore-not-found=true +kubectl delete configmap mssql-config -n $NAMESPACE --ignore-not-found=true +kubectl delete secret mssql-general -n $NAMESPACE --ignore-not-found=true + +# Wait for PVC to be deleted +echo -e "${YELLOW}โณ Waiting for PVC cleanup...${NC}" +while kubectl get pvc -l app=mssql -n $NAMESPACE 2>/dev/null | grep -q mssql; do + echo "Waiting for PVC to be deleted..." + sleep 2 +done + +echo "" +echo -e "${YELLOW}๐Ÿ“ Creating ConfigMap...${NC}" +kubectl create configmap mssql-config -n $NAMESPACE \ + --from-literal=DATABASE_SERVER='mssql' \ + --from-literal=DATABASE_HOST='mssql-service' \ + --from-literal=DATABASE_NAME="$DATABASE_NAME" \ + --from-literal=DATABASE_PORT='1433' \ + --from-literal=MSSQL_DATA_DIR='/var/opt/mssql/data' \ + --from-literal=SCHEDULE='*/5 * * * *' \ + --from-literal=BACKUP_KEEP_DAYS='7' \ + --from-literal=S3_BUCKET="$S3_BUCKET" \ + --from-literal=S3_PREFIX='mssql-backups' \ + --from-literal=S3_REGION='us-east-1' \ + ${S3_ENDPOINT:+--from-literal=S3_ENDPOINT="$S3_ENDPOINT"} \ + ${S3_ENDPOINT:+--from-literal=S3_S3V4='yes'} + +echo "" +echo -e "${YELLOW}๐Ÿ” Creating Secret...${NC}" +kubectl create secret generic mssql-general -n $NAMESPACE \ + --from-literal=MSSQL_SA_PASSWORD="$MSSQL_PASSWORD" \ + --from-literal=DATABASE_USER='sa' \ + --from-literal=DATABASE_PASSWORD="$MSSQL_PASSWORD" \ + --from-literal=S3_ACCESS_KEY_ID="${S3_ACCESS_KEY_ID:-minioadmin}" \ + --from-literal=S3_SECRET_ACCESS_KEY="${S3_SECRET_ACCESS_KEY:-minioadmin}" \ + --from-literal=PASSPHRASE="${PASSPHRASE:-TestEncryptionPassphrase123}" + +echo "" +echo -e "${YELLOW}๐Ÿ“ฆ Deploying StatefulSet from $STATEFULSET_FILE...${NC}" +kubectl apply -f $STATEFULSET_FILE -n $NAMESPACE + +echo "" +echo -e "${YELLOW}โณ Waiting for pod to be ready (this may take 1-2 minutes)...${NC}" +kubectl wait --for=condition=ready pod/$POD_NAME -n $NAMESPACE --timeout=300s + +echo "" +echo -e "${YELLOW}โณ Waiting for MSSQL to be fully initialized...${NC}" +sleep 10 + +# Check if both containers are running +MSSQL_READY=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[?(@.name=="mssql")].ready}') +BACKUP_READY=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[?(@.name=="backup")].ready}') + +if [ "$MSSQL_READY" != "true" ] || [ "$BACKUP_READY" != "true" ]; then + echo -e "${RED}โŒ Containers not ready!${NC}" + echo "MSSQL ready: $MSSQL_READY" + echo "Backup ready: $BACKUP_READY" + kubectl describe pod $POD_NAME -n $NAMESPACE + exit 1 +fi + +echo -e "${GREEN}โœ… Pod is ready with both containers running!${NC}" + +echo "" +echo -e "${YELLOW}๐Ÿ—„๏ธ Creating test database...${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C \ + -Q "CREATE DATABASE $DATABASE_NAME;" + +echo "" +echo -e "${YELLOW}๐Ÿ“ Creating test table and inserting data...${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \ + -Q "CREATE TABLE Users (id INT PRIMARY KEY, name VARCHAR(50)); INSERT INTO Users VALUES (1, 'John'), (2, 'Jane');" + +echo "" +echo -e "${YELLOW}๐Ÿ“Š Current data:${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \ + -Q "SELECT * FROM Users;" + +echo "" +echo -e "${YELLOW}๐Ÿ’พ Running backup...${NC}" +kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh backup.sh + +echo "" +echo -e "${YELLOW}๐Ÿ“‹ Checking backup container logs...${NC}" +kubectl logs $POD_NAME -c backup -n $NAMESPACE --tail=20 + +# Optional: List S3 backups if aws CLI is available in the backup container +echo "" +echo -e "${YELLOW}๐Ÿ“‹ Checking S3 for backups...${NC}" +if [ -n "$S3_ENDPOINT" ]; then + kubectl exec $POD_NAME -c backup -n $NAMESPACE -- aws s3 ls s3://$S3_BUCKET/mssql-backups/ --endpoint-url="$S3_ENDPOINT" 2>/dev/null || echo "Note: Could not list S3 bucket (this is OK for local testing)" +else + kubectl exec $POD_NAME -c backup -n $NAMESPACE -- aws s3 ls s3://$S3_BUCKET/mssql-backups/ 2>/dev/null || echo "Note: Could not list S3 bucket (this is OK for local testing)" +fi + +echo "" +echo -e "${YELLOW}๐Ÿ” Verifying backup is encrypted...${NC}" +# Check the backup logs for encryption activity +BACKUP_LOGS=$(kubectl logs $POD_NAME -c backup -n $NAMESPACE --tail=100 2>/dev/null || echo "") +if echo "$BACKUP_LOGS" | grep -q "Encrypting backup"; then + echo -e "${GREEN}โœ… Backup encryption confirmed${NC}" +elif echo "$BACKUP_LOGS" | grep -q "\.bak\.gpg"; then + echo -e "${GREEN}โœ… Backup is encrypted (.gpg extension detected in logs)${NC}" +elif echo "$BACKUP_LOGS" | grep -q "\.dump\.gpg"; then + echo -e "${GREEN}โœ… Backup is encrypted (.gpg extension detected in logs)${NC}" +else + # Final check: was PASSPHRASE set? + PASSPHRASE_SET=$(kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh -c 'test -n "$PASSPHRASE" && echo "yes" || echo "no"' 2>/dev/null) + if [ "$PASSPHRASE_SET" = "yes" ]; then + echo -e "${YELLOW}โš ๏ธ PASSPHRASE is set, but cannot confirm encryption from logs${NC}" + echo -e "${YELLOW} (Encryption should be active, will verify during restore)${NC}" + else + echo -e "${RED}โŒ Warning: PASSPHRASE not set - backups are NOT encrypted${NC}" + fi +fi + +echo "" +echo -e "${YELLOW}๐Ÿ”จ Modifying database (deleting John)...${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \ + -Q "DELETE FROM Users WHERE name = 'John';" + +echo "" +echo -e "${YELLOW}๐Ÿ“Š Current data after modification (should only show Jane):${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \ + -Q "SELECT * FROM Users;" + +echo "" +echo -e "${YELLOW}โ™ป๏ธ Restoring from backup...${NC}" +RESTORE_OUTPUT=$(kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh restore.sh 2>&1) +echo "$RESTORE_OUTPUT" + +# Verify decryption happened during restore +if echo "$RESTORE_OUTPUT" | grep -q "Decrypting backup"; then + echo -e "${GREEN}โœ… Backup was successfully decrypted during restore${NC}" +elif echo "$RESTORE_OUTPUT" | grep -q "encrypted with 1 passphrase"; then + echo -e "${GREEN}โœ… GPG decryption confirmed${NC}" +fi + +echo "" +echo -e "${YELLOW}๐Ÿ“Š Data after restore (should show both John and Jane):${NC}" +kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \ + -Q "SELECT * FROM Users;" + +echo "" +echo -e "${YELLOW}๐Ÿ” Verifying restoration...${NC}" +RECORD_COUNT=$(kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME -h -1 -W \ + -Q "SET NOCOUNT ON; SELECT COUNT(*) FROM Users;" | grep -v '^$' | tr -d '[:space:]') + +if [ "$RECORD_COUNT" = "2" ]; then + echo -e "${GREEN}โœ… Success! Both records were restored correctly.${NC}" +else + echo -e "${RED}โŒ Failed! Expected 2 records, found: $RECORD_COUNT${NC}" + exit 1 +fi + +echo "" +echo -e "${YELLOW}๐Ÿ“Š Checking resource usage...${NC}" +kubectl top pod $POD_NAME -n $NAMESPACE --containers 2>/dev/null || echo "Note: Metrics server not available" + +echo "" +echo -e "${GREEN}๐ŸŽ‰ All tests passed!${NC}" +echo "" +echo -e "${BLUE}Additional commands you can try:${NC}" +echo -e " ${YELLOW}# View MSSQL logs:${NC}" +echo -e " kubectl logs $POD_NAME -c mssql -n $NAMESPACE" +echo "" +echo -e " ${YELLOW}# View backup logs:${NC}" +echo -e " kubectl logs $POD_NAME -c backup -n $NAMESPACE" +echo "" +echo -e " ${YELLOW}# Execute manual backup:${NC}" +echo -e " kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh backup.sh" +echo "" +echo -e " ${YELLOW}# Connect to MSSQL:${NC}" +echo -e " kubectl exec -it $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd -S localhost -U sa -P '$MSSQL_PASSWORD' -C" +echo "" +echo -e " ${YELLOW}# Check disk usage:${NC}" +echo -e " kubectl exec $POD_NAME -c backup -n $NAMESPACE -- df -h /var/opt/mssql/data" +echo "" + diff --git a/tests/test-mssql.sh b/tests/test-mssql.sh new file mode 100755 index 0000000..8a3de27 --- /dev/null +++ b/tests/test-mssql.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set -e # Exit on error (but we'll handle specific commands) + +echo "๐Ÿš€ Starting test of MSSQL backup functionality..." + +echo "๐Ÿงน Cleaning up any existing containers..." +docker compose down -v 2>/dev/null || true + +echo "๐Ÿ“ฆ Starting MinIO and MSSQL..." +docker compose up -d minio mssql + +echo "โณ Waiting for MinIO to be ready..." +for i in {1..10}; do + if docker exec db-backup-s3-minio-1 mc alias set local http://localhost:9000 miniouser minioroot &>/dev/null; then + echo "โœ… MinIO is ready!" + break + fi + echo -n "." + sleep 1 +done +echo "" + +echo "๐Ÿ“ฆ Creating backups bucket..." +docker exec db-backup-s3-minio-1 mc mb local/backups --ignore-existing || true + +echo "โณ Waiting for MSSQL to be ready (this takes about 30 seconds)..." +for i in {1..30}; do + if docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C \ + -Q "SELECT 1" &>/dev/null; then + echo "โœ… MSSQL is ready!" + break + fi + echo -n "." + sleep 1 +done +echo "" + +echo "๐Ÿ—„๏ธ Creating test database..." +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C \ + -Q "CREATE DATABASE TestDB;" + +echo "๐Ÿ“ Creating test table and inserting data..." +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \ + -Q "CREATE TABLE Users (id INT, name VARCHAR(50)); INSERT INTO Users VALUES (1, 'John'), (2, 'Jane');" + +echo "๐Ÿ“Š Current data:" +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \ + -Q "SELECT * FROM Users;" + +echo "" +echo "๐Ÿ’พ Running backup..." +docker compose run --rm backup-mssql sh backup.sh + +echo "" +echo "๐Ÿ“‹ Checking MinIO for backup..." +echo "Backups in bucket:" +docker exec db-backup-s3-minio-1 mc ls local/backups/mssql-backups/ + +echo "" +echo "๐Ÿ”จ Modifying database (deleting John)..." +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \ + -Q "DELETE FROM Users WHERE id = 1;" + +echo "๐Ÿ“Š Current data after modification (should only show Jane):" +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \ + -Q "SELECT * FROM Users;" + +echo "" +echo "โ™ป๏ธ Restoring from backup..." +docker compose run --rm backup-mssql sh restore.sh + +echo "" +echo "๐Ÿ“Š Data after restore (should show both John and Jane):" +docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \ + -S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \ + -Q "SELECT * FROM Users;" + +echo "" +echo "โœจ Test complete! Cleaning up..." +# docker compose down -v + +echo "๐ŸŽ‰ All done!"