Merge pull request #1 from ditkrg/add-mssql

Add mssql to the supported databases
This commit is contained in:
Shakar Bakr 2025-10-27 13:22:53 +03:00 committed by GitHub
commit 7a5b76c06a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 1472 additions and 26 deletions

View File

@ -1,18 +1,30 @@
# Download go-cron
ARG ALPINE_VERSION=3.21 ARG ALPINE_VERSION=3.21
FROM alpine:${ALPINE_VERSION} FROM alpine:${ALPINE_VERSION}
WORKDIR /
# Install tools for PostgreSQL, MariaDB, and AWS CLI
RUN apk update && \ RUN apk update && \
apk add --no-cache \ apk add --no-cache \
gnupg \ gnupg \
aws-cli \ aws-cli \
postgresql-client \ postgresql-client \
mysql-client mariadb-connector-c mysql-client mariadb-connector-c \
curl
# Install MSSQL tools (sqlcmd) for Microsoft SQL Server on Alpine
# Source: https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-setup-tools
RUN curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_18.1.1.1-1_amd64.apk && \
curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_18.1.1.1-1_amd64.apk && \
apk add --allow-untrusted msodbcsql18_18.1.1.1-1_amd64.apk && \
apk add --allow-untrusted mssql-tools18_18.1.1.1-1_amd64.apk && \
rm msodbcsql18_18.1.1.1-1_amd64.apk mssql-tools18_18.1.1.1-1_amd64.apk
RUN rm -rf /var/cache/apk/* RUN rm -rf /var/cache/apk/*
ENV PATH="${PATH}:/opt/mssql-tools18/bin"
ENV DATABASE_NAME '' ENV DATABASE_NAME ''
ENV DATABASE_HOST '' ENV DATABASE_HOST ''
ENV DATABASE_PORT '' ENV DATABASE_PORT ''
@ -22,6 +34,8 @@ ENV DATABASE_PASSWORD ''
ENV PGDUMP_EXTRA_OPTS '' ENV PGDUMP_EXTRA_OPTS ''
ENV MARIADB_DUMP_EXTRA_OPTS '' ENV MARIADB_DUMP_EXTRA_OPTS ''
ENV MARIADB_EXTRA_OPTS '' ENV MARIADB_EXTRA_OPTS ''
ENV MSSQL_EXTRA_OPTS ''
ENV MSSQL_DATA_DIR '/var/opt/mssql/data'
ENV S3_ACCESS_KEY_ID '' ENV S3_ACCESS_KEY_ID ''
ENV S3_SECRET_ACCESS_KEY '' ENV S3_SECRET_ACCESS_KEY ''
ENV S3_BUCKET '' ENV S3_BUCKET ''

282
README.md
View File

@ -1,6 +1,11 @@
# Introduction # Introduction
This project provides Docker images to periodically back up a database to AWS S3, and to restore from the backup as needed. This project provides Docker images to periodically back up a database to AWS S3, and to restore from the backup as needed.
Supported databases:
- PostgreSQL
- MariaDB/MySQL
- Microsoft SQL Server (MSSQL)
# Usage # Usage
## Backup ## Backup
```yaml ```yaml
@ -26,9 +31,50 @@ services:
DATABASE_NAME: dbname DATABASE_NAME: dbname
DATABASE_USER: user DATABASE_USER: user
DATABASE_PASSWORD: password DATABASE_PASSWORD: password
DATABASE_SERVER: postgres DATABASE_SERVER: postgres # postgres, mariadb, or mssql
``` ```
### MSSQL Example
**Note:** MSSQL backups use `sqlcmd` with the native `BACKUP DATABASE` command, which writes backup files server-side. This requires a shared volume between the MSSQL and backup containers.
```yaml
services:
mssql:
image: mcr.microsoft.com/mssql/server:2022-latest
platform: linux/amd64 # Required for Apple Silicon Macs
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: YourStrong@Passw0rd
MSSQL_PID: Express
volumes:
- mssql-data:/var/opt/mssql # Shared volume
backup:
image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21
platform: linux/amd64 # Required for Apple Silicon Macs
volumes:
- mssql-data:/var/opt/mssql # Shared volume with MSSQL
environment:
SCHEDULE: '@daily'
S3_REGION: us-east-1
S3_ACCESS_KEY_ID: your_key
S3_SECRET_ACCESS_KEY: your_secret
S3_BUCKET: my-bucket
S3_PREFIX: mssql-backup
DATABASE_HOST: mssql
DATABASE_PORT: 1433
DATABASE_NAME: MyDatabase
DATABASE_USER: sa
DATABASE_PASSWORD: YourStrong@Passw0rd
DATABASE_SERVER: mssql
MSSQL_DATA_DIR: /var/opt/mssql/data # MSSQL data directory (where backups are temporarily stored)
volumes:
mssql-data: # Shared volume for MSSQL data and backups
```
See [`docker-compose.yaml`](./docker-compose.yaml) for a complete working example.
- Images are tagged by the major PostgreSQL version supported: `11`, `12`, `13`, `14`, or `15`. - Images are tagged by the major PostgreSQL version supported: `11`, `12`, `13`, `14`, or `15`.
- The `SCHEDULE` variable determines backup frequency. See go-cron schedules documentation [here](http://godoc.org/github.com/robfig/cron#hdr-Predefined_schedules). Omit to run the backup immediately and then exit. - The `SCHEDULE` variable determines backup frequency. See go-cron schedules documentation [here](http://godoc.org/github.com/robfig/cron#hdr-Predefined_schedules). Omit to run the backup immediately and then exit.
- If `PASSPHRASE` is provided, the backup will be encrypted using GPG. - If `PASSPHRASE` is provided, the backup will be encrypted using GPG.
@ -36,6 +82,217 @@ services:
- If `BACKUP_KEEP_DAYS` is set, backups older than this many days will be deleted from S3. - If `BACKUP_KEEP_DAYS` is set, backups older than this many days will be deleted from S3.
- Set `S3_ENDPOINT` if you're using a non-AWS S3-compatible storage provider. - Set `S3_ENDPOINT` if you're using a non-AWS S3-compatible storage provider.
## Kubernetes Examples
### Standard CronJob (PostgreSQL, MariaDB)
PostgreSQL and MariaDB can use a standard Kubernetes CronJob since they use client-side backup tools (`pg_dump`, `mariadb-dump`) that don't require shared volumes:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
spec:
schedule: "0 2 * * *" # Daily at 2 AM
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: backup
image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21
env:
- name: DATABASE_SERVER
value: "postgres"
- name: DATABASE_HOST
value: "postgres-service"
- name: DATABASE_PORT
value: "5432"
- name: DATABASE_NAME
valueFrom:
secretKeyRef:
name: db-credentials
key: database
- name: DATABASE_USER
valueFrom:
secretKeyRef:
name: db-credentials
key: username
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
- name: S3_REGION
value: "us-east-1"
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-id
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: secret-access-key
- name: S3_BUCKET
value: "my-backups"
- name: S3_PREFIX
value: "postgres-backups"
- name: BACKUP_KEEP_DAYS
value: "7"
```
### MSSQL CronJob Example
> **Note:** For MSSQL StatefulSets with `ReadWriteOnce` volumes, use the [sidecar pattern](#mssql-with-statefulset-sidecar-pattern) instead. This CronJob example only works if you have a `ReadWriteMany` volume or a separate network-accessible MSSQL instance.
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: mssql-backup
spec:
schedule: "0 2 * * *" # Daily at 2 AM
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: backup
image: reg.dev.krd/db-backup-s3/db-backup-s3:alpine-3.21
env:
- name: DATABASE_SERVER
value: "mssql"
- name: DATABASE_HOST
value: "mssql-service"
- name: DATABASE_PORT
value: "1433"
- name: DATABASE_NAME
valueFrom:
secretKeyRef:
name: db-credentials
key: database
- name: DATABASE_USER
valueFrom:
secretKeyRef:
name: db-credentials
key: username
- name: DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
- name: MSSQL_DATA_DIR
value: "/var/opt/mssql/data"
- name: S3_REGION
value: "us-east-1"
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-id
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: secret-access-key
- name: S3_BUCKET
value: "my-backups"
- name: S3_PREFIX
value: "mssql-backups"
- name: BACKUP_KEEP_DAYS
value: "7"
volumeMounts:
- name: mssql-data
mountPath: /var/opt/mssql/data
volumes:
- name: mssql-data
persistentVolumeClaim:
claimName: mssql-data-pvc # Must be ReadWriteMany for CronJob
```
**Manual Backup Trigger:**
```bash
# Create a one-off job from the CronJob
kubectl create job --from=cronjob/mssql-backup manual-backup-$(date +%Y%m%d-%H%M%S)
```
### MSSQL with StatefulSet (Sidecar Pattern)
For MSSQL StatefulSets with `ReadWriteOnce` volumes, use the **sidecar pattern** instead of a CronJob. This allows the backup container to share the same volume as the database container, which is required for `sqlcmd`'s native `BACKUP DATABASE` command.
**Why Sidecar for MSSQL?**
- `ReadWriteOnce` volumes can only be mounted by one pod at a time
- MSSQL's `BACKUP DATABASE` writes files server-side to `/var/opt/mssql/data`
- A sidecar container in the same pod can access the same volume
- No need for complex volume mounting or client-side backup tools
**Example: StatefulSet with Backup Sidecar**
See [`tests/k8s-statefulset-with-sidecar.yaml`](./tests/k8s-statefulset-with-sidecar.yaml) for a complete example.
```yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mssql
spec:
# ... (your existing StatefulSet config)
template:
spec:
containers:
# MSSQL Container
- name: mssql
image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04
# ... (your existing MSSQL config)
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data
# Backup Sidecar Container
- name: backup
image: ghcr.io/your-org/db-backup-s3:latest
env:
- name: SCHEDULE
value: "0 2 * * *" # Daily at 2 AM
- name: DATABASE_SERVER
value: "mssql"
- name: DATABASE_HOST
value: "localhost" # Same pod
- name: DATABASE_PORT
value: "1433"
- name: MSSQL_DATA_DIR
value: "/var/opt/mssql/data"
# ... (S3 and DB credentials from secrets)
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data # Shared with MSSQL container
```
**Key Configuration:**
- `DATABASE_HOST: "localhost"` - Both containers are in the same pod
- `MSSQL_DATA_DIR: "/var/opt/mssql/data"` - MSSQL's data directory (where backup files are temporarily stored)
- Both containers mount the same volume at `/var/opt/mssql/data`
- Set `SCHEDULE` env var for automated backups (cron format)
**Trigger Manual Backup:**
```bash
# Execute backup in the sidecar container
kubectl exec -it mssql-0 -c backup -- sh backup.sh
```
**Restore from Backup:**
```bash
# Restore latest backup
kubectl exec -it mssql-0 -c backup -- sh restore.sh
# Restore specific backup by timestamp
kubectl exec -it mssql-0 -c backup -- sh restore.sh 2025-10-22T14:05:00
```
## Restore ## Restore
> **WARNING:** DATA LOSS! All database objects will be dropped and re-created. > **WARNING:** DATA LOSS! All database objects will be dropped and re-created.
### ... from latest backup ### ... from latest backup
@ -61,6 +318,29 @@ cp template.env .env
docker compose up -d docker compose up -d
``` ```
## Test Scripts
### Docker Compose
```sh
# Test MSSQL backup/restore with Docker Compose
./tests/test-mssql.sh
```
### Kubernetes (Recommended - Everything in One Namespace)
```sh
# Complete automated test with local MinIO
# Creates mssql-backup-test namespace with BOTH MinIO and MSSQL
./tests/test-mssql-k8s-with-minio.sh
# Manual test (if you already have S3/MinIO elsewhere)
NAMESPACE=mssql-backup-test S3_ENDPOINT=http://your-s3 ./tests/test-mssql-k8s.sh
# Clean up (removes everything - one command!)
kubectl delete namespace mssql-backup-test
```
**Architecture:** MinIO and MSSQL run in the same namespace for simplified networking and easy cleanup.
# Acknowledgements # Acknowledgements
This project is a fork and re-structuring @eeshugerman's fork of @schickling's [postgres-backup-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-backup-s3) and [postgres-restore-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-restore-s3). This project is a fork and re-structuring @eeshugerman's fork of @schickling's [postgres-backup-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-backup-s3) and [postgres-restore-s3](https://github.com/schickling/dockerfiles/tree/master/postgres-restore-s3).

View File

@ -16,21 +16,35 @@ services:
MYSQL_DATABASE: database MYSQL_DATABASE: database
MYSQL_ROOT_PASSWORD: root_password MYSQL_ROOT_PASSWORD: root_password
mssql:
platform: linux/amd64
image: mcr.microsoft.com/mssql/server:2022-latest
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: YourStrong@Passw0rd
MSSQL_PID: Express
ports:
- "11433:1433" # Use port 11433 externally to avoid conflicts
volumes:
- mssql-data:/var/opt/mssql # Database storage and backup location (shared with backup container)
minio: minio:
image: bitnami/minio image: minio/minio:latest
command: server /data --console-address ":9001"
ports: ports:
- 9000:9000 - 9000:9000
- 9001:9001 - 9001:9001
environment: environment:
MINIO_ROOT_USER: miniouser MINIO_ROOT_USER: miniouser
MINIO_ROOT_PASSWORD: minioroot MINIO_ROOT_PASSWORD: minioroot
MINIO_DEFAULT_BUCKETS: backups volumes:
- minio-data:/data
backup-postgres: backup-postgres:
build: build:
context: . context: .
args: args:
ALPINE_VERSION: '3.21' ALPINE_VERSION: "3.21"
environment: environment:
# SCHEDULE: '@weekly' # optional # SCHEDULE: '@weekly' # optional
BACKUP_KEEP_DAYS: 7 # optional BACKUP_KEEP_DAYS: 7 # optional
@ -52,7 +66,7 @@ services:
build: build:
context: . context: .
args: args:
ALPINE_VERSION: '3.21' ALPINE_VERSION: "3.21"
environment: environment:
# SCHEDULE: '@weekly' # optional # SCHEDULE: '@weekly' # optional
BACKUP_KEEP_DAYS: 7 # optional BACKUP_KEEP_DAYS: 7 # optional
@ -69,3 +83,32 @@ services:
DATABASE_PORT: 3306 DATABASE_PORT: 3306
DATABASE_SERVER: mysql DATABASE_SERVER: mysql
DATABASE_PASSWORD: root_password DATABASE_PASSWORD: root_password
backup-mssql:
platform: linux/amd64
build:
context: .
args:
ALPINE_VERSION: "3.21"
volumes:
- mssql-data:/var/opt/mssql # Shared volume for native BACKUP DATABASE command
environment:
# SCHEDULE: '@weekly' # optional
BACKUP_KEEP_DAYS: 7 # optional
# PASSPHRASE: passphrase # optional - uncomment to enable GPG encryption
# S3_REGION:
S3_ENDPOINT: http://minio:9000
S3_ACCESS_KEY_ID: miniouser
S3_SECRET_ACCESS_KEY: minioroot
S3_BUCKET: backups
S3_PREFIX: mssql-backups
DATABASE_HOST: mssql
DATABASE_NAME: TestDB
DATABASE_USER: sa
DATABASE_PORT: 1433
DATABASE_SERVER: mssql
DATABASE_PASSWORD: YourStrong@Passw0rd
MSSQL_DATA_DIR: /var/opt/mssql/data # Path where MSSQL backups are stored (must match volume mount)
volumes:
mssql-data: # MSSQL database storage
minio-data: # MinIO object storage

View File

@ -10,16 +10,23 @@ echo "Creating backup of $DATABASE_NAME database..."
backup backup
timestamp=$(date +"%Y-%m-%dT%H:%M:%S") timestamp=$(date +"%Y-%m-%dT%H:%M:%S")
s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.dump"
# MSSQL uses .bak extension, other databases use .dump
if [ "$DATABASE_SERVER" = "mssql" ]; then
local_file="${MSSQL_DATA_DIR}/db.bak"
s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.bak"
else
local_file="db.dump"
s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}/${DATABASE_NAME}_${timestamp}.dump"
fi
if [ -n "$PASSPHRASE" ]; then if [ -n "$PASSPHRASE" ]; then
echo "Encrypting backup..." echo "Encrypting backup..."
gpg --symmetric --batch --passphrase "$PASSPHRASE" db.dump gpg --symmetric --batch --passphrase "$PASSPHRASE" "$local_file"
rm db.dump rm "$local_file"
local_file="db.dump.gpg" local_file="${local_file}.gpg"
s3_uri="${s3_uri_base}.gpg" s3_uri="${s3_uri_base}.gpg"
else else
local_file="db.dump"
s3_uri="$s3_uri_base" s3_uri="$s3_uri_base"
fi fi

View File

@ -4,7 +4,7 @@ if [ -z "$S3_BUCKET" ]; then
fi fi
if [ -z "$DATABASE_SERVER" ]; then if [ -z "$DATABASE_SERVER" ]; then
echo "You need to set the DATABASE_SERVER environment variable. (postgres, mariadb)" echo "You need to set the DATABASE_SERVER environment variable. (postgres, mariadb, mssql)"
exit 1 exit 1
fi fi

View File

@ -6,6 +6,8 @@ backup() {
backup_postgres backup_postgres
elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then
backup_mariadb backup_mariadb
elif [[ "$DATABASE_SERVER" == "mssql" ]]; then
backup_mssql
else else
echo "Unknown database server: $DATABASE_SERVER" echo "Unknown database server: $DATABASE_SERVER"
exit 1 exit 1
@ -17,6 +19,8 @@ restore() {
restore_postgres restore_postgres
elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then elif [[ "$DATABASE_SERVER" == "mariadb" ]]; then
restore_mariadb restore_mariadb
elif [[ "$DATABASE_SERVER" == "mssql" ]]; then
restore_mssql
else else
echo "Unknown database server: $DATABASE_SERVER" echo "Unknown database server: $DATABASE_SERVER"
exit 1 exit 1
@ -55,3 +59,40 @@ restore_mariadb() {
--password="$DATABASE_PASSWORD" "$MARIADB_EXTRA_OPTS" \ --password="$DATABASE_PASSWORD" "$MARIADB_EXTRA_OPTS" \
$DATABASE_NAME < db.dump $DATABASE_NAME < db.dump
} }
backup_mssql() {
# Use native BACKUP DATABASE command
# Note: Requires shared volume mounted at MSSQL_DATA_DIR
sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \
-U ${DATABASE_USER} \
-P "${DATABASE_PASSWORD}" \
-C \
-Q "BACKUP DATABASE [${DATABASE_NAME}] TO DISK = N'${MSSQL_DATA_DIR}/db.bak' WITH INIT;" \
$MSSQL_EXTRA_OPTS
}
restore_mssql() {
echo "Restoring from backup..."
# Get logical file names from the backup
logical_files=$(sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \
-U ${DATABASE_USER} \
-P "${DATABASE_PASSWORD}" \
-C -W \
-Q "SET NOCOUNT ON; RESTORE FILELISTONLY FROM DISK = N'${MSSQL_DATA_DIR}/db.bak';" \
| grep -v '^$' | awk '{print $1}' | tail -n +3)
# Parse logical names (first two lines after headers)
data_file=$(echo "$logical_files" | sed -n '1p')
log_file=$(echo "$logical_files" | sed -n '2p')
# Restore database with MOVE options
sqlcmd -S ${DATABASE_HOST},${DATABASE_PORT} \
-U ${DATABASE_USER} \
-P "${DATABASE_PASSWORD}" \
-C \
-Q "RESTORE DATABASE [${DATABASE_NAME}] FROM DISK = N'${MSSQL_DATA_DIR}/db.bak' WITH REPLACE, MOVE N'${data_file}' TO N'${MSSQL_DATA_DIR}/${DATABASE_NAME}.mdf', MOVE N'${log_file}' TO N'${MSSQL_DATA_DIR}/${DATABASE_NAME}_log.ldf';" \
$MSSQL_EXTRA_OPTS
# Clean up backup file
rm "${MSSQL_DATA_DIR}/db.bak"
}

View File

@ -8,10 +8,21 @@ source ./helpers.sh
s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}" s3_uri_base="s3://${S3_BUCKET}/${S3_PREFIX}"
if [ -z "$PASSPHRASE" ]; then # MSSQL uses .bak extension, other databases use .dump
file_type=".dump" if [ "$DATABASE_SERVER" = "mssql" ]; then
backup_file="${MSSQL_DATA_DIR}/db.bak"
if [ -z "$PASSPHRASE" ]; then
file_type=".bak"
else
file_type=".bak.gpg"
fi
else else
file_type=".dump.gpg" backup_file="db.dump"
if [ -z "$PASSPHRASE" ]; then
file_type=".dump"
else
file_type=".dump.gpg"
fi
fi fi
if [ $# -eq 1 ]; then if [ $# -eq 1 ]; then
@ -28,16 +39,22 @@ else
fi fi
echo "Fetching backup from S3..." echo "Fetching backup from S3..."
aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "db${file_type}"
if [ -n "$PASSPHRASE" ]; then if [ -n "$PASSPHRASE" ]; then
aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "${backup_file}.gpg"
echo "Decrypting backup..." echo "Decrypting backup..."
gpg --decrypt --batch --passphrase "$PASSPHRASE" db.dump.gpg > db.dump gpg --decrypt --batch --passphrase "$PASSPHRASE" "${backup_file}.gpg" > "${backup_file}"
rm db.dump.gpg rm "${backup_file}.gpg"
else
aws $aws_args s3 cp "${s3_uri_base}/${key_suffix}" "${backup_file}"
fi fi
echo "Restoring from backup..." echo "Restoring from backup..."
restore restore
rm db.dump
# Clean up backup file
# Note: For MSSQL, the file is in MSSQL_DATA_DIR and cleanup happens in restore_mssql()
if [ "$DATABASE_SERVER" != "mssql" ]; then
rm "${backup_file}"
fi
echo "Restore complete." echo "Restore complete."

View File

@ -11,8 +11,22 @@ fi
if [ -z "$SCHEDULE" ]; then if [ -z "$SCHEDULE" ]; then
sh backup.sh sh backup.sh
else else
# Use crond from busybox which is available in Alpine # For non-root users, use a writable directory for crontabs
echo "$SCHEDULE /bin/sh $(pwd)/backup.sh" > /etc/crontabs/root # busybox crond supports -c option to specify crontab directory
# Start crond in foreground mode CRON_USER=$(id -u)
exec crond -f -d 8 CRON_DIR="${HOME}/crontabs"
# Create crontab directory
mkdir -p "$CRON_DIR"
# Write crontab entry
echo "$SCHEDULE /bin/sh $(pwd)/backup.sh" > "$CRON_DIR/$CRON_USER"
chmod 600 "$CRON_DIR/$CRON_USER"
echo "Backup schedule configured: $SCHEDULE"
echo "Crontab file: $CRON_DIR/$CRON_USER"
echo "Starting crond..."
# Start crond in foreground mode with custom crontab directory
exec crond -f -d 8 -c "$CRON_DIR"
fi fi

56
tests/README.md Normal file
View File

@ -0,0 +1,56 @@
# Test Scripts
This directory contains automated test scripts for validating the backup and restore functionality across different environments.
## Files
### Docker Compose Tests
- **`test-mssql.sh`**: Tests MSSQL backup/restore using Docker Compose with local MinIO
### Kubernetes Tests
- **`test-mssql-k8s.sh`**: Tests MSSQL StatefulSet with backup sidecar (requires existing S3/MinIO)
- **`test-mssql-k8s-with-minio.sh`**: Complete automated test that deploys MinIO alongside MSSQL
- **`setup-minio-k8s.sh`**: Helper script to deploy MinIO in Kubernetes
### Kubernetes Configuration Examples
- **`k8s-statefulset-with-sidecar.yaml`**: Production-ready MSSQL StatefulSet with backup sidecar
- **`k8s-statefulset-test.yaml`**: Test StatefulSet configuration used by automated test scripts
- **`k8s-mssql-configmap-example.yaml`**: Example ConfigMap for non-sensitive configuration
- **`k8s-mssql-secret-example.yaml`**: Example Secret for sensitive credentials
## Usage
### Quick Start (Recommended)
Run the complete automated test with MinIO:
```bash
./tests/test-mssql-k8s-with-minio.sh
```
This will:
- Create a test namespace (`mssql-backup-test`)
- Deploy MinIO
- Deploy MSSQL StatefulSet with backup sidecar
- Run backup and restore tests
- Verify encryption is working
### Manual Kubernetes Test
If you have an existing S3 endpoint:
```bash
NAMESPACE=mssql-backup-test \
S3_ENDPOINT=http://your-s3:9000 \
S3_ACCESS_KEY_ID=your-key \
S3_SECRET_ACCESS_KEY=your-secret \
./tests/test-mssql-k8s.sh
```
### Docker Compose Test
```bash
./tests/test-mssql.sh
```
## Cleanup
Delete the test namespace to remove all resources:
```bash
kubectl delete namespace mssql-backup-test
```

View File

@ -0,0 +1,98 @@
# Example mssql-config ConfigMap
# This ConfigMap contains non-sensitive configuration for MSSQL and backup sidecar
#
# Usage:
# 1. Copy this file and update with your actual values
# 2. Apply: kubectl apply -f tests/k8s-mssql-configmap.yaml
# 3. Create secret: kubectl apply -f tests/k8s-mssql-secret.yaml
# 4. Deploy: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mssql-config
namespace: default # Update with your namespace
data:
# ============================================
# Database Configuration
# ============================================
DATABASE_SERVER: "mssql"
DATABASE_HOST: "mssql-service" # MSSQL hostname (overridden to "localhost" in sidecar pattern)
DATABASE_NAME: "MyDatabase" # The database to backup
DATABASE_PORT: "1433"
# ============================================
# MSSQL Specific Configuration
# ============================================
MSSQL_DATA_DIR: "/var/opt/mssql/data"
# MSSQL_EXTRA_OPTS: "" # Additional sqlcmd options if needed
# ============================================
# Backup Schedule and Retention
# ============================================
SCHEDULE: "0 2 * * *" # Daily at 2 AM (cron format)
BACKUP_KEEP_DAYS: "7" # Keep backups for 7 days
# Cron schedule examples:
# "@daily" - Once per day at midnight
# "@weekly" - Once per week at midnight Sunday
# "@hourly" - Once per hour
# "0 */6 * * *" - Every 6 hours
# "0 2 * * *" - Every day at 2 AM
# "0 3 * * 0" - Every Sunday at 3 AM
# "0 0 1 * *" - First day of every month at midnight
# ============================================
# AWS S3 Configuration
# ============================================
S3_BUCKET: "my-database-backups"
S3_PREFIX: "mssql-backups"
S3_REGION: "us-east-1"
# ============================================
# Optional: S3-Compatible Storage (MinIO, Wasabi, etc.)
# ============================================
# Uncomment and configure if using non-AWS S3-compatible storage
S3_ENDPOINT: "https://s3.example.com" # Your S3-compatible endpoint
S3_S3V4: "yes" # Use Signature Version 4
# Common S3-compatible endpoints:
# MinIO: "https://minio.example.com"
# Wasabi: "https://s3.wasabisys.com"
# DigitalOcean: "https://nyc3.digitaloceanspaces.com"
# Backblaze B2: "https://s3.us-west-001.backblazeb2.com"
---
# Notes:
#
# 1. ConfigMap vs Secret:
# - ConfigMap: Non-sensitive configuration (endpoints, names, schedules)
# - Secret: Sensitive data (passwords, access keys)
#
# 2. To update ConfigMap after deployment:
# kubectl apply -f k8s-mssql-configmap.yaml
# kubectl rollout restart statefulset/mssql
#
# 3. To view the ConfigMap:
# kubectl get configmap mssql-config -o yaml
#
# 4. DATABASE_HOST behavior:
# - In sidecar pattern: Set to "localhost" (overridden in StatefulSet)
# - In CronJob pattern: Use service name like "mssql-service"
# The ConfigMap default is for CronJob; sidecar overrides it in the pod spec.
#
# 5. Alternative: Create from command line:
# kubectl create configmap mssql-config \
# --from-literal=DATABASE_SERVER='mssql' \
# --from-literal=DATABASE_HOST='mssql-service' \
# --from-literal=DATABASE_NAME='MyDatabase' \
# --from-literal=DATABASE_PORT='1433' \
# --from-literal=MSSQL_DATA_DIR='/var/opt/mssql/data' \
# --from-literal=SCHEDULE='0 2 * * *' \
# --from-literal=BACKUP_KEEP_DAYS='7' \
# --from-literal=S3_BUCKET='my-backups' \
# --from-literal=S3_PREFIX='mssql-backups' \
# --from-literal=S3_REGION='us-east-1' \
# --from-literal=S3_ENDPOINT='https://s3.example.com' \
# --from-literal=S3_S3V4='yes'

View File

@ -0,0 +1,72 @@
# Example mssql-general Secret
# This secret contains ONLY sensitive credentials (passwords, keys)
# Non-sensitive configuration is in the ConfigMap (tests/k8s-mssql-configmap-example.yaml)
#
# Usage:
# 1. Copy this file and update with your actual values
# 2. Apply ConfigMap: kubectl apply -f tests/k8s-mssql-configmap.yaml
# 3. Apply Secret: kubectl apply -f tests/k8s-mssql-secret.yaml
# 4. Deploy: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml
apiVersion: v1
kind: Secret
metadata:
name: mssql-general
namespace: default # Update with your namespace
type: Opaque
stringData:
# ============================================
# MSSQL Server Credentials
# ============================================
MSSQL_SA_PASSWORD: "YourStrong@Passw0rd" # Must meet SQL Server complexity requirements
# ============================================
# Database Backup Credentials
# ============================================
DATABASE_USER: "sa"
DATABASE_PASSWORD: "YourStrong@Passw0rd" # Same as MSSQL_SA_PASSWORD
# ============================================
# AWS S3 Credentials
# ============================================
S3_ACCESS_KEY_ID: "AKIAIOSFODNN7EXAMPLE"
S3_SECRET_ACCESS_KEY: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
# ============================================
# Optional: GPG Encryption Passphrase
# ============================================
# Uncomment to enable encryption of backup files before upload
# PASSPHRASE: "my-super-secret-gpg-passphrase"
---
# Notes:
#
# 1. Password Requirements:
# - MSSQL_SA_PASSWORD must meet SQL Server complexity requirements:
# * At least 8 characters
# * Mix of uppercase, lowercase, digits, and symbols
# * Example: "MyP@ssw0rd123"
#
# 2. For production, consider using:
# - SealedSecrets: https://github.com/bitnami-labs/sealed-secrets
# - External Secrets Operator: https://external-secrets.io/
# - AWS Secrets Manager, Azure Key Vault, or HashiCorp Vault
#
# 3. Non-sensitive configuration (bucket, endpoints, schedule):
# These are now in the ConfigMap (tests/k8s-mssql-configmap-example.yaml)
# This keeps secrets clean and allows easier configuration changes
#
# 4. Alternative: Create secret from command line:
# kubectl create secret generic mssql-general \
# --from-literal=MSSQL_SA_PASSWORD='YourStrong@Passw0rd' \
# --from-literal=DATABASE_USER='sa' \
# --from-literal=DATABASE_PASSWORD='YourStrong@Passw0rd' \
# --from-literal=S3_ACCESS_KEY_ID='YOUR_KEY' \
# --from-literal=S3_SECRET_ACCESS_KEY='YOUR_SECRET'
#
# 4. To view the secret (base64 encoded):
# kubectl get secret mssql-general -o yaml
#
# 5. To decode a specific key:
# kubectl get secret mssql-general -o jsonpath='{.data.DATABASE_NAME}' | base64 -d

View File

@ -0,0 +1,138 @@
# MSSQL StatefulSet with Backup Sidecar
#
# This configuration runs a backup container as a sidecar alongside MSSQL Server.
# Both containers share the same volume, allowing the backup container to access
# MSSQL's native backup files.
#
# Prerequisites:
# 1. Create the ConfigMap: kubectl apply -f tests/k8s-mssql-configmap-example.yaml
# 2. Create the Secret: kubectl apply -f tests/k8s-mssql-secret-example.yaml
# 3. Apply this StatefulSet: kubectl apply -f tests/k8s-statefulset-test.yaml
#
# The backup container will automatically run backups according to the SCHEDULE.
#
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mssql
spec:
replicas: 1
selector:
matchLabels:
app: mssql
serviceName: mssql
template:
metadata:
labels:
app: mssql
spec:
containers:
# MSSQL Server Container
- name: mssql
image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04
ports:
- containerPort: 1433
env:
- name: ACCEPT_EULA
value: "Y"
- name: MSSQL_PID
value: Express
envFrom:
- secretRef:
name: mssql-general
resources:
limits:
memory: 4Gi
requests:
cpu: 100m
memory: 4Gi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data
# Backup Sidecar Container
- name: backup
image: reg.dev.krd/db-backup-s3/db-backup-s3:test
imagePullPolicy: Always # Update with your image
# Load configuration from ConfigMap and Secret
envFrom:
- configMapRef:
name: mssql-config # Non-sensitive config (schedule, bucket, endpoint)
- secretRef:
name: mssql-general # Sensitive credentials (passwords, keys)
# Override specific values after loading from ConfigMap/Secret
env:
# Override DATABASE_HOST from ConfigMap since we're in the same pod
- name: DATABASE_HOST
value: "localhost" # Sidecar uses localhost; ConfigMap default is for CronJob pattern
# Set HOME to writable location for AWS CLI
- name: HOME
value: "/tmp"
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false # Needs write access for temp backup files
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
seccompProfile:
type: RuntimeDefault
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
---
# Configuration Structure
#
# This StatefulSet uses a ConfigMap for non-sensitive config and a Secret for credentials:
#
# ConfigMap (mssql-config) - See tests/k8s-mssql-configmap-example.yaml
# - DATABASE_SERVER, DATABASE_HOST, DATABASE_NAME, DATABASE_PORT
# - MSSQL_DATA_DIR, MSSQL_EXTRA_OPTS
# - SCHEDULE, BACKUP_KEEP_DAYS
# - S3_BUCKET, S3_PREFIX, S3_REGION, S3_ENDPOINT, S3_S3V4
# Note: DATABASE_HOST is overridden to "localhost" in the StatefulSet for sidecar pattern
#
# Secret (mssql-general) - See tests/k8s-mssql-secret-example.yaml
# - MSSQL_SA_PASSWORD
# - DATABASE_USER, DATABASE_PASSWORD
# - S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY
# - PASSPHRASE (optional, for GPG encryption)
#
# Benefits of separating ConfigMap and Secret:
# - Easier to update non-sensitive configuration
# - Better security practices (minimal secret exposure)
# - ConfigMap changes don't require secret rotation
# - Can use different RBAC policies for each

View File

@ -0,0 +1,137 @@
# MSSQL StatefulSet with Backup Sidecar
#
# This configuration runs a backup container as a sidecar alongside MSSQL Server.
# Both containers share the same volume, allowing the backup container to access
# MSSQL's native backup files.
#
# Prerequisites:
# 1. Create the ConfigMap: kubectl apply -f tests/k8s-mssql-configmap-example.yaml
# 2. Create the Secret: kubectl apply -f tests/k8s-mssql-secret-example.yaml
# 3. Apply this StatefulSet: kubectl apply -f tests/k8s-statefulset-with-sidecar.yaml
#
# The backup container will automatically run backups according to the SCHEDULE.
#
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mssql
spec:
replicas: 1
selector:
matchLabels:
app: mssql
serviceName: mssql
template:
metadata:
labels:
app: mssql
spec:
containers:
# MSSQL Server Container
- name: mssql
image: mcr.microsoft.com/mssql/server:2022-CU14-ubuntu-22.04
ports:
- containerPort: 1433
env:
- name: ACCEPT_EULA
value: "Y"
- name: MSSQL_PID
value: Express
envFrom:
- secretRef:
name: mssql-general
resources:
limits:
memory: 4Gi
requests:
cpu: 100m
memory: 4Gi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data
# Backup Sidecar Container
- name: backup
image: ghcr.io/your-org/db-backup-s3:latest # Update with your image
# Load configuration from ConfigMap and Secret
envFrom:
- configMapRef:
name: mssql-config # Non-sensitive config (schedule, bucket, endpoint)
- secretRef:
name: mssql-general # Sensitive credentials (passwords, keys)
# Override specific values after loading from ConfigMap/Secret
env:
# Override DATABASE_HOST from ConfigMap since we're in the same pod
- name: DATABASE_HOST
value: "localhost" # Sidecar uses localhost; ConfigMap default is for CronJob pattern
# Set HOME to writable location for AWS CLI
- name: HOME
value: "/tmp"
resources:
limits:
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false # Needs write access for temp backup files
volumeMounts:
- mountPath: /var/opt/mssql/data
name: data
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
seccompProfile:
type: RuntimeDefault
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
---
# Configuration Structure
#
# This StatefulSet uses a ConfigMap for non-sensitive config and a Secret for credentials:
#
# ConfigMap (mssql-config) - See tests/k8s-mssql-configmap-example.yaml
# - DATABASE_SERVER, DATABASE_HOST, DATABASE_NAME, DATABASE_PORT
# - MSSQL_DATA_DIR, MSSQL_EXTRA_OPTS
# - SCHEDULE, BACKUP_KEEP_DAYS
# - S3_BUCKET, S3_PREFIX, S3_REGION, S3_ENDPOINT, S3_S3V4
# Note: DATABASE_HOST is overridden to "localhost" in the StatefulSet for sidecar pattern
#
# Secret (mssql-general) - See tests/k8s-mssql-secret-example.yaml
# - MSSQL_SA_PASSWORD
# - DATABASE_USER, DATABASE_PASSWORD
# - S3_ACCESS_KEY_ID, S3_SECRET_ACCESS_KEY
# - PASSPHRASE (optional, for GPG encryption)
#
# Benefits of separating ConfigMap and Secret:
# - Easier to update non-sensitive configuration
# - Better security practices (minimal secret exposure)
# - ConfigMap changes don't require secret rotation
# - Can use different RBAC policies for each

127
tests/setup-minio-k8s.sh Executable file
View File

@ -0,0 +1,127 @@
#!/bin/bash
set -e
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Setting up MinIO in Kubernetes for testing...${NC}"
MINIO_NAMESPACE="${NAMESPACE:-mssql-backup-test}"
MINIO_USER="minioadmin"
MINIO_PASSWORD="minioadmin"
BUCKET_NAME="backups"
echo ""
echo -e "${YELLOW}📦 Creating namespace: $MINIO_NAMESPACE${NC}"
kubectl create namespace $MINIO_NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
echo ""
echo -e "${YELLOW}🗄️ Deploying MinIO...${NC}"
kubectl apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: minio
namespace: $MINIO_NAMESPACE
labels:
app: minio
spec:
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
seccompProfile:
type: RuntimeDefault
containers:
- name: minio
image: minio/minio:latest
args:
- server
- /data
- --console-address
- :9001
ports:
- containerPort: 9000
name: api
- containerPort: 9001
name: console
env:
- name: MINIO_ROOT_USER
value: "$MINIO_USER"
- name: MINIO_ROOT_PASSWORD
value: "$MINIO_PASSWORD"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: $MINIO_NAMESPACE
spec:
type: ClusterIP
ports:
- port: 9000
targetPort: 9000
name: api
- port: 9001
targetPort: 9001
name: console
selector:
app: minio
EOF
echo ""
echo -e "${YELLOW}⏳ Waiting for MinIO to be ready...${NC}"
kubectl wait --for=condition=ready pod/minio -n $MINIO_NAMESPACE --timeout=120s
echo ""
echo -e "${YELLOW}⏳ Waiting for MinIO to start (5 seconds)...${NC}"
sleep 5
echo ""
echo -e "${YELLOW}📦 Creating bucket: $BUCKET_NAME${NC}"
kubectl exec -n $MINIO_NAMESPACE minio -- sh -c "
mc alias set local http://localhost:9000 $MINIO_USER $MINIO_PASSWORD && \
mc mb local/$BUCKET_NAME --ignore-existing && \
mc ls local/
"
echo ""
echo -e "${GREEN}✅ MinIO is ready!${NC}"
echo ""
echo -e "${BLUE}MinIO Details:${NC}"
echo -e " Namespace: $MINIO_NAMESPACE"
echo -e " Service: minio:9000 (within namespace)"
echo -e " Full Service: minio.$MINIO_NAMESPACE.svc.cluster.local:9000"
echo -e " Access Key: $MINIO_USER"
echo -e " Secret Key: $MINIO_PASSWORD"
echo -e " Bucket: $BUCKET_NAME"
echo ""
echo -e "${BLUE}To access MinIO Console:${NC}"
echo -e " kubectl port-forward -n $MINIO_NAMESPACE pod/minio 9001:9001"
echo -e " Then open: http://localhost:9001"
echo ""
echo -e "${BLUE}To run the MSSQL backup test (same namespace):${NC}"
echo -e " NAMESPACE=$MINIO_NAMESPACE \\"
echo -e " S3_ENDPOINT=http://minio:9000 \\"
echo -e " S3_ACCESS_KEY_ID=$MINIO_USER \\"
echo -e " S3_SECRET_ACCESS_KEY=$MINIO_PASSWORD \\"
echo -e " S3_BUCKET=$BUCKET_NAME \\"
echo -e " ./test-mssql-k8s.sh"
echo ""

View File

@ -0,0 +1,78 @@
#!/bin/bash
set -e
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Complete MSSQL Backup Test with MinIO${NC}"
echo -e "${BLUE}=========================================${NC}"
echo ""
# Use same namespace for everything
TEST_NAMESPACE="${NAMESPACE:-mssql-backup-test}"
MINIO_USER="minioadmin"
MINIO_PASSWORD="minioadmin"
BUCKET_NAME="backups"
MINIO_ENDPOINT="http://minio:9000" # Simple service name (same namespace)
echo -e "${GREEN}📦 Test Configuration:${NC}"
echo -e " Namespace: ${YELLOW}$TEST_NAMESPACE${NC}"
echo -e " Resources: MinIO + MSSQL (both in same namespace)"
echo -e " S3 Endpoint: $MINIO_ENDPOINT"
echo ""
# Check if MinIO is already running
MINIO_EXISTS=$(kubectl get pod minio -n $TEST_NAMESPACE 2>/dev/null | grep -c "minio" || true)
if [ "$MINIO_EXISTS" -eq 0 ]; then
echo -e "${YELLOW}📦 MinIO not found. Deploying MinIO in $TEST_NAMESPACE...${NC}"
NAMESPACE=$TEST_NAMESPACE "$(dirname "$0")/setup-minio-k8s.sh"
else
echo -e "${GREEN}✅ MinIO already running in $TEST_NAMESPACE namespace${NC}"
# Verify MinIO is ready
echo -e "${YELLOW}⏳ Checking MinIO status...${NC}"
kubectl wait --for=condition=ready pod/minio -n $TEST_NAMESPACE --timeout=60s
# Ensure bucket exists
echo -e "${YELLOW}📦 Ensuring bucket exists: $BUCKET_NAME${NC}"
kubectl exec -n $TEST_NAMESPACE minio -- sh -c "
mc alias set local http://localhost:9000 $MINIO_USER $MINIO_PASSWORD 2>/dev/null && \
mc mb local/$BUCKET_NAME --ignore-existing 2>/dev/null
" || echo "Bucket already exists or created"
fi
echo ""
echo -e "${BLUE}======================================${NC}"
echo -e "${BLUE}🧪 Running MSSQL Backup Test${NC}"
echo -e "${BLUE}======================================${NC}"
echo ""
# Run the test with MinIO configuration (same namespace)
NAMESPACE="$TEST_NAMESPACE" \
STATEFULSET_FILE="$(dirname "$0")/k8s-statefulset-test.yaml" \
S3_ENDPOINT="$MINIO_ENDPOINT" \
S3_ACCESS_KEY_ID="$MINIO_USER" \
S3_SECRET_ACCESS_KEY="$MINIO_PASSWORD" \
S3_BUCKET="$BUCKET_NAME" \
"$(dirname "$0")/test-mssql-k8s.sh"
echo ""
echo -e "${GREEN}🎉 All tests completed successfully!${NC}"
echo ""
echo -e "${BLUE}📊 View backups in MinIO:${NC}"
echo -e " kubectl exec -n $TEST_NAMESPACE minio -- mc ls local/$BUCKET_NAME/mssql-backups/"
echo ""
echo -e "${BLUE}🌐 Access MinIO Console:${NC}"
echo -e " kubectl port-forward -n $TEST_NAMESPACE pod/minio 9001:9001"
echo -e " Then open: http://localhost:9001"
echo -e " Login: $MINIO_USER / $MINIO_PASSWORD"
echo ""
echo -e "${BLUE}🧹 Cleanup (everything in one namespace):${NC}"
echo -e " kubectl delete namespace $TEST_NAMESPACE"
echo ""

236
tests/test-mssql-k8s.sh Executable file
View File

@ -0,0 +1,236 @@
#!/bin/bash
set -e
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${BLUE}🚀 Starting Kubernetes MSSQL backup test...${NC}"
# Configuration
NAMESPACE="${NAMESPACE:-mssql-backup-test}"
STATEFULSET_NAME="mssql"
POD_NAME="mssql-0"
MSSQL_PASSWORD="YourStrong@Passw0rd"
DATABASE_NAME="TestDB"
MSSQL_DATA_DIR="${MSSQL_DATA_DIR:-/var/opt/mssql/data}"
S3_BUCKET="${S3_BUCKET:-test-backups}"
S3_ENDPOINT="${S3_ENDPOINT:-}" # Set this if using MinIO or other S3-compatible storage
STATEFULSET_FILE="${STATEFULSET_FILE:-$(dirname "$0")/k8s-statefulset-with-sidecar.yaml}"
# Cleanup function
cleanup() {
echo ""
echo -e "${YELLOW}✨ Cleaning up resources...${NC}"
kubectl delete statefulset $STATEFULSET_NAME -n $NAMESPACE --ignore-not-found=true
kubectl delete pvc -l app=mssql -n $NAMESPACE --ignore-not-found=true
kubectl delete configmap mssql-config -n $NAMESPACE --ignore-not-found=true
kubectl delete secret mssql-general -n $NAMESPACE --ignore-not-found=true
# Optionally delete the namespace (uncomment to auto-delete)
# kubectl delete namespace $NAMESPACE --ignore-not-found=true
echo -e "${GREEN}🎉 Cleanup complete!${NC}"
echo -e "${BLUE}💡 To delete the namespace (including MinIO if present):${NC}"
echo -e "${BLUE} kubectl delete namespace $NAMESPACE${NC}"
}
# Trap cleanup on exit
trap cleanup EXIT
echo ""
echo -e "${YELLOW}📦 Creating namespace: $NAMESPACE${NC}"
kubectl create namespace $NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
echo ""
echo -e "${YELLOW}🧹 Cleaning up any existing resources in namespace...${NC}"
kubectl delete statefulset $STATEFULSET_NAME -n $NAMESPACE --ignore-not-found=true
kubectl delete pvc -l app=mssql -n $NAMESPACE --ignore-not-found=true
kubectl delete configmap mssql-config -n $NAMESPACE --ignore-not-found=true
kubectl delete secret mssql-general -n $NAMESPACE --ignore-not-found=true
# Wait for PVC to be deleted
echo -e "${YELLOW}⏳ Waiting for PVC cleanup...${NC}"
while kubectl get pvc -l app=mssql -n $NAMESPACE 2>/dev/null | grep -q mssql; do
echo "Waiting for PVC to be deleted..."
sleep 2
done
echo ""
echo -e "${YELLOW}📝 Creating ConfigMap...${NC}"
kubectl create configmap mssql-config -n $NAMESPACE \
--from-literal=DATABASE_SERVER='mssql' \
--from-literal=DATABASE_HOST='mssql-service' \
--from-literal=DATABASE_NAME="$DATABASE_NAME" \
--from-literal=DATABASE_PORT='1433' \
--from-literal=MSSQL_DATA_DIR='/var/opt/mssql/data' \
--from-literal=SCHEDULE='*/5 * * * *' \
--from-literal=BACKUP_KEEP_DAYS='7' \
--from-literal=S3_BUCKET="$S3_BUCKET" \
--from-literal=S3_PREFIX='mssql-backups' \
--from-literal=S3_REGION='us-east-1' \
${S3_ENDPOINT:+--from-literal=S3_ENDPOINT="$S3_ENDPOINT"} \
${S3_ENDPOINT:+--from-literal=S3_S3V4='yes'}
echo ""
echo -e "${YELLOW}🔐 Creating Secret...${NC}"
kubectl create secret generic mssql-general -n $NAMESPACE \
--from-literal=MSSQL_SA_PASSWORD="$MSSQL_PASSWORD" \
--from-literal=DATABASE_USER='sa' \
--from-literal=DATABASE_PASSWORD="$MSSQL_PASSWORD" \
--from-literal=S3_ACCESS_KEY_ID="${S3_ACCESS_KEY_ID:-minioadmin}" \
--from-literal=S3_SECRET_ACCESS_KEY="${S3_SECRET_ACCESS_KEY:-minioadmin}" \
--from-literal=PASSPHRASE="${PASSPHRASE:-TestEncryptionPassphrase123}"
echo ""
echo -e "${YELLOW}📦 Deploying StatefulSet from $STATEFULSET_FILE...${NC}"
kubectl apply -f $STATEFULSET_FILE -n $NAMESPACE
echo ""
echo -e "${YELLOW}⏳ Waiting for pod to be ready (this may take 1-2 minutes)...${NC}"
kubectl wait --for=condition=ready pod/$POD_NAME -n $NAMESPACE --timeout=300s
echo ""
echo -e "${YELLOW}⏳ Waiting for MSSQL to be fully initialized...${NC}"
sleep 10
# Check if both containers are running
MSSQL_READY=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[?(@.name=="mssql")].ready}')
BACKUP_READY=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.containerStatuses[?(@.name=="backup")].ready}')
if [ "$MSSQL_READY" != "true" ] || [ "$BACKUP_READY" != "true" ]; then
echo -e "${RED}❌ Containers not ready!${NC}"
echo "MSSQL ready: $MSSQL_READY"
echo "Backup ready: $BACKUP_READY"
kubectl describe pod $POD_NAME -n $NAMESPACE
exit 1
fi
echo -e "${GREEN}✅ Pod is ready with both containers running!${NC}"
echo ""
echo -e "${YELLOW}🗄️ Creating test database...${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C \
-Q "CREATE DATABASE $DATABASE_NAME;"
echo ""
echo -e "${YELLOW}📝 Creating test table and inserting data...${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \
-Q "CREATE TABLE Users (id INT PRIMARY KEY, name VARCHAR(50)); INSERT INTO Users VALUES (1, 'John'), (2, 'Jane');"
echo ""
echo -e "${YELLOW}📊 Current data:${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \
-Q "SELECT * FROM Users;"
echo ""
echo -e "${YELLOW}💾 Running backup...${NC}"
kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh backup.sh
echo ""
echo -e "${YELLOW}📋 Checking backup container logs...${NC}"
kubectl logs $POD_NAME -c backup -n $NAMESPACE --tail=20
# Optional: List S3 backups if aws CLI is available in the backup container
echo ""
echo -e "${YELLOW}📋 Checking S3 for backups...${NC}"
if [ -n "$S3_ENDPOINT" ]; then
kubectl exec $POD_NAME -c backup -n $NAMESPACE -- aws s3 ls s3://$S3_BUCKET/mssql-backups/ --endpoint-url="$S3_ENDPOINT" 2>/dev/null || echo "Note: Could not list S3 bucket (this is OK for local testing)"
else
kubectl exec $POD_NAME -c backup -n $NAMESPACE -- aws s3 ls s3://$S3_BUCKET/mssql-backups/ 2>/dev/null || echo "Note: Could not list S3 bucket (this is OK for local testing)"
fi
echo ""
echo -e "${YELLOW}🔐 Verifying backup is encrypted...${NC}"
# Check the backup logs for encryption activity
BACKUP_LOGS=$(kubectl logs $POD_NAME -c backup -n $NAMESPACE --tail=100 2>/dev/null || echo "")
if echo "$BACKUP_LOGS" | grep -q "Encrypting backup"; then
echo -e "${GREEN}✅ Backup encryption confirmed${NC}"
elif echo "$BACKUP_LOGS" | grep -q "\.bak\.gpg"; then
echo -e "${GREEN}✅ Backup is encrypted (.gpg extension detected in logs)${NC}"
elif echo "$BACKUP_LOGS" | grep -q "\.dump\.gpg"; then
echo -e "${GREEN}✅ Backup is encrypted (.gpg extension detected in logs)${NC}"
else
# Final check: was PASSPHRASE set?
PASSPHRASE_SET=$(kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh -c 'test -n "$PASSPHRASE" && echo "yes" || echo "no"' 2>/dev/null)
if [ "$PASSPHRASE_SET" = "yes" ]; then
echo -e "${YELLOW}⚠️ PASSPHRASE is set, but cannot confirm encryption from logs${NC}"
echo -e "${YELLOW} (Encryption should be active, will verify during restore)${NC}"
else
echo -e "${RED}❌ Warning: PASSPHRASE not set - backups are NOT encrypted${NC}"
fi
fi
echo ""
echo -e "${YELLOW}🔨 Modifying database (deleting John)...${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \
-Q "DELETE FROM Users WHERE name = 'John';"
echo ""
echo -e "${YELLOW}📊 Current data after modification (should only show Jane):${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \
-Q "SELECT * FROM Users;"
echo ""
echo -e "${YELLOW}♻️ Restoring from backup...${NC}"
RESTORE_OUTPUT=$(kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh restore.sh 2>&1)
echo "$RESTORE_OUTPUT"
# Verify decryption happened during restore
if echo "$RESTORE_OUTPUT" | grep -q "Decrypting backup"; then
echo -e "${GREEN}✅ Backup was successfully decrypted during restore${NC}"
elif echo "$RESTORE_OUTPUT" | grep -q "encrypted with 1 passphrase"; then
echo -e "${GREEN}✅ GPG decryption confirmed${NC}"
fi
echo ""
echo -e "${YELLOW}📊 Data after restore (should show both John and Jane):${NC}"
kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME \
-Q "SELECT * FROM Users;"
echo ""
echo -e "${YELLOW}🔍 Verifying restoration...${NC}"
RECORD_COUNT=$(kubectl exec $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P "$MSSQL_PASSWORD" -C -d $DATABASE_NAME -h -1 -W \
-Q "SET NOCOUNT ON; SELECT COUNT(*) FROM Users;" | grep -v '^$' | tr -d '[:space:]')
if [ "$RECORD_COUNT" = "2" ]; then
echo -e "${GREEN}✅ Success! Both records were restored correctly.${NC}"
else
echo -e "${RED}❌ Failed! Expected 2 records, found: $RECORD_COUNT${NC}"
exit 1
fi
echo ""
echo -e "${YELLOW}📊 Checking resource usage...${NC}"
kubectl top pod $POD_NAME -n $NAMESPACE --containers 2>/dev/null || echo "Note: Metrics server not available"
echo ""
echo -e "${GREEN}🎉 All tests passed!${NC}"
echo ""
echo -e "${BLUE}Additional commands you can try:${NC}"
echo -e " ${YELLOW}# View MSSQL logs:${NC}"
echo -e " kubectl logs $POD_NAME -c mssql -n $NAMESPACE"
echo ""
echo -e " ${YELLOW}# View backup logs:${NC}"
echo -e " kubectl logs $POD_NAME -c backup -n $NAMESPACE"
echo ""
echo -e " ${YELLOW}# Execute manual backup:${NC}"
echo -e " kubectl exec $POD_NAME -c backup -n $NAMESPACE -- sh backup.sh"
echo ""
echo -e " ${YELLOW}# Connect to MSSQL:${NC}"
echo -e " kubectl exec -it $POD_NAME -c mssql -n $NAMESPACE -- /opt/mssql-tools18/bin/sqlcmd -S localhost -U sa -P '$MSSQL_PASSWORD' -C"
echo ""
echo -e " ${YELLOW}# Check disk usage:${NC}"
echo -e " kubectl exec $POD_NAME -c backup -n $NAMESPACE -- df -h /var/opt/mssql/data"
echo ""

88
tests/test-mssql.sh Executable file
View File

@ -0,0 +1,88 @@
#!/bin/bash
set -e # Exit on error (but we'll handle specific commands)
echo "🚀 Starting test of MSSQL backup functionality..."
echo "🧹 Cleaning up any existing containers..."
docker compose down -v 2>/dev/null || true
echo "📦 Starting MinIO and MSSQL..."
docker compose up -d minio mssql
echo "⏳ Waiting for MinIO to be ready..."
for i in {1..10}; do
if docker exec db-backup-s3-minio-1 mc alias set local http://localhost:9000 miniouser minioroot &>/dev/null; then
echo "✅ MinIO is ready!"
break
fi
echo -n "."
sleep 1
done
echo ""
echo "📦 Creating backups bucket..."
docker exec db-backup-s3-minio-1 mc mb local/backups --ignore-existing || true
echo "⏳ Waiting for MSSQL to be ready (this takes about 30 seconds)..."
for i in {1..30}; do
if docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C \
-Q "SELECT 1" &>/dev/null; then
echo "✅ MSSQL is ready!"
break
fi
echo -n "."
sleep 1
done
echo ""
echo "🗄️ Creating test database..."
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C \
-Q "CREATE DATABASE TestDB;"
echo "📝 Creating test table and inserting data..."
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \
-Q "CREATE TABLE Users (id INT, name VARCHAR(50)); INSERT INTO Users VALUES (1, 'John'), (2, 'Jane');"
echo "📊 Current data:"
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \
-Q "SELECT * FROM Users;"
echo ""
echo "💾 Running backup..."
docker compose run --rm backup-mssql sh backup.sh
echo ""
echo "📋 Checking MinIO for backup..."
echo "Backups in bucket:"
docker exec db-backup-s3-minio-1 mc ls local/backups/mssql-backups/
echo ""
echo "🔨 Modifying database (deleting John)..."
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \
-Q "DELETE FROM Users WHERE id = 1;"
echo "📊 Current data after modification (should only show Jane):"
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \
-Q "SELECT * FROM Users;"
echo ""
echo "♻️ Restoring from backup..."
docker compose run --rm backup-mssql sh restore.sh
echo ""
echo "📊 Data after restore (should show both John and Jane):"
docker exec db-backup-s3-mssql-1 /opt/mssql-tools18/bin/sqlcmd \
-S localhost -U sa -P 'YourStrong@Passw0rd' -C -d TestDB \
-Q "SELECT * FROM Users;"
echo ""
echo "✨ Test complete! Cleaning up..."
# docker compose down -v
echo "🎉 All done!"