From 5c83420ad1340e6be97767a8b91a7547c72ec5c9 Mon Sep 17 00:00:00 2001 From: zhangzhenghao Date: Fri, 3 Apr 2026 21:16:01 +0800 Subject: [PATCH 1/2] Add backup retention policy and streaming compression - Keep only latest 7 backups on S3 - Use streaming compression (mysqldump | gzip) to reduce disk I/O - No intermediate uncompressed SQL file needed Benefits: - Prevents unlimited backup accumulation - Reduces S3 storage costs - Faster backup with less disk space usage --- scripts/backup.sh | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/scripts/backup.sh b/scripts/backup.sh index 5bde071..3985ad2 100644 --- a/scripts/backup.sh +++ b/scripts/backup.sh @@ -2,13 +2,10 @@ set -e while true; do - SQL_FILE=$(date '+%Y-%m-%d.%H').sql + SQL_FILE=$(date '+%Y-%m-%d.%H').sql.gz - # Dump database - mysqldump --no-tablespaces -h ${MYSQL_HOST:=127.0.0.1} -u ${MYSQL_USER:=gorse} -p${MYSQL_PASSWORD:=gorse_pass} --ssl-verify-server-cert=0 ${MYSQL_DATABASE:=gorse} users items feedback flask_dance_oauth > $SQL_FILE - - # Compress SQL file - gzip $SQL_FILE + # Dump and compress database in one stream + mysqldump --no-tablespaces -h ${MYSQL_HOST:=127.0.0.1} -u ${MYSQL_USER:=gorse} -p${MYSQL_PASSWORD:=gorse_pass} --ssl-verify-server-cert=0 ${MYSQL_DATABASE:=gorse} users items feedback flask_dance_oauth | gzip > $SQL_FILE # Upload SQL file s3cmd --access_key=$S3_ACCESS_KEY \ @@ -16,10 +13,32 @@ while true; do --region=$S3_BUCKET_LOCATION \ --host=$S3_HOST_BASE \ --host-bucket=$S3_HOST_BUCKET \ - put ${SQL_FILE}.gz s3://${S3_BUCKET}${S3_PREFIX}/${SQL_FILE}.gz + put $SQL_FILE s3://${S3_BUCKET}${S3_PREFIX}/$SQL_FILE # Remove local SQL file - rm *.sql.gz + rm $SQL_FILE + + # Keep only the latest 7 backups on remote + BACKUP_FILES=$(s3cmd --access_key=$S3_ACCESS_KEY \ + --secret_key=$S3_SECRET_KEY \ + --region=$S3_BUCKET_LOCATION \ + --host=$S3_HOST_BASE \ + --host-bucket=$S3_HOST_BUCKET \ + ls s3://${S3_BUCKET}${S3_PREFIX}/ | grep '\.sql\.gz$' | sort -r) + + # Count and delete old backups + COUNT=0 + for FILE in $BACKUP_FILES; do + COUNT=$((COUNT + 1)) + if [ $COUNT -gt 7 ]; then + s3cmd --access_key=$S3_ACCESS_KEY \ + --secret_key=$S3_SECRET_KEY \ + --region=$S3_BUCKET_LOCATION \ + --host=$S3_HOST_BASE \ + --host-bucket=$S3_HOST_BUCKET \ + del $FILE + fi + done # Backup 1 day later. sleep 86400 From 31dd2578df9f1557292ad46369d736a85c3b6042 Mon Sep 17 00:00:00 2001 From: Zhenghao Zhang Date: Sat, 11 Apr 2026 13:33:01 +0800 Subject: [PATCH 2/2] fix: correct backup file listing command in backup script --- scripts/backup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/backup.sh b/scripts/backup.sh index 3985ad2..13eaa5d 100644 --- a/scripts/backup.sh +++ b/scripts/backup.sh @@ -24,7 +24,7 @@ while true; do --region=$S3_BUCKET_LOCATION \ --host=$S3_HOST_BASE \ --host-bucket=$S3_HOST_BUCKET \ - ls s3://${S3_BUCKET}${S3_PREFIX}/ | grep '\.sql\.gz$' | sort -r) + ls s3://${S3_BUCKET}${S3_PREFIX}/ | grep '\.sql\.gz$' | sort -r | awk '{print $4}') # Count and delete old backups COUNT=0