diff --git a/.gitignore b/.gitignore index 3bef3410..0efe6985 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,12 @@ _testmain.go gpbackup gprestore gpbackup_helper +gpbackman + +!gpbackman/ + +# vscode +.vscode/ # Logs *.log diff --git a/Makefile b/Makefile index a5c3138a..c1f8cf05 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ BACKUP=gpbackup RESTORE=gprestore HELPER=gpbackup_helper S3PLUGIN=gpbackup_s3_plugin +GPBACKMAN=gpbackman BIN_DIR=$(shell echo $${GOPATH:-~/go} | awk -F':' '{ print $$1 "/bin"}') GINKGO_FLAGS := -r --keep-going --randomize-suites --randomize-all --no-color GIT_VERSION := $(shell v=$$(git describe --tags 2>/dev/null); if [ -n "$$v" ]; then echo $$v | perl -pe 's/(.*)-([0-9]*)-(g[0-9a-f]*)/\1+dev.\2.\3/'; else cat VERSION 2>/dev/null || echo "dev"; fi) @@ -16,9 +17,10 @@ BACKUP_VERSION_STR=github.com/apache/cloudberry-backup/backup.version=$(GIT_VERS RESTORE_VERSION_STR=github.com/apache/cloudberry-backup/restore.version=$(GIT_VERSION) HELPER_VERSION_STR=github.com/apache/cloudberry-backup/helper.version=$(GIT_VERSION) S3PLUGIN_VERSION_STR=github.com/apache/cloudberry-backup/plugins/s3plugin.version=$(GIT_VERSION) +GPBACKMAN_VERSION_STR=github.com/apache/cloudberry-backup/gpbackman/cmd.version=$(GIT_VERSION) # note that /testutils is not a production directory, but has unit tests to validate testing tools -SUBDIRS_HAS_UNIT=backup/ filepath/ history/ helper/ options/ report/ restore/ toc/ utils/ testutils/ plugins/s3plugin/ +SUBDIRS_HAS_UNIT=backup/ filepath/ history/ helper/ options/ report/ restore/ toc/ utils/ testutils/ plugins/s3plugin/ gpbackman/cmd/ gpbackman/gpbckpconfig/ gpbackman/textmsg/ SUBDIRS_ALL=$(SUBDIRS_HAS_UNIT) integration/ end_to_end/ GOLANG_LINTER=$(GOPATH)/bin/golangci-lint GINKGO=$(GOPATH)/bin/ginkgo @@ -87,21 +89,24 @@ build : $(GOSQLITE) CGO_ENABLED=1 $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) --ldflags '-X $(RESTORE_VERSION_STR)' CGO_ENABLED=1 $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) --ldflags '-X $(HELPER_VERSION_STR)' CGO_ENABLED=1 $(GO_BUILD) -tags '$(S3PLUGIN)' -o $(BIN_DIR)/$(S3PLUGIN) --ldflags '-X $(S3PLUGIN_VERSION_STR)' + CGO_ENABLED=1 $(GO_BUILD) -tags '$(GPBACKMAN)' -o $(BIN_DIR)/$(GPBACKMAN) --ldflags '-X $(GPBACKMAN_VERSION_STR)' debug : CGO_ENABLED=1 $(GO_BUILD) -tags '$(BACKUP)' -o $(BIN_DIR)/$(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" $(DEBUG) CGO_ENABLED=1 $(GO_BUILD) -tags '$(RESTORE)' -o $(BIN_DIR)/$(RESTORE) -ldflags "-X $(RESTORE_VERSION_STR)" $(DEBUG) CGO_ENABLED=1 $(GO_BUILD) -tags '$(HELPER)' -o $(BIN_DIR)/$(HELPER) -ldflags "-X $(HELPER_VERSION_STR)" $(DEBUG) CGO_ENABLED=1 $(GO_BUILD) -tags '$(S3PLUGIN)' -o $(BIN_DIR)/$(S3PLUGIN) -ldflags "-X $(S3PLUGIN_VERSION_STR)" $(DEBUG) + CGO_ENABLED=1 $(GO_BUILD) -tags '$(GPBACKMAN)' -o $(BIN_DIR)/$(GPBACKMAN) -ldflags "-X $(GPBACKMAN_VERSION_STR)" $(DEBUG) build_linux : env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(BACKUP)' -o $(BACKUP) -ldflags "-X $(BACKUP_VERSION_STR)" env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(RESTORE)' -o $(RESTORE) -ldflags "-X $(RESTORE_VERSION_STR)" env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(HELPER)' -o $(HELPER) -ldflags "-X $(HELPER_VERSION_STR)" env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(S3PLUGIN)' -o $(S3PLUGIN) -ldflags "-X $(S3PLUGIN_VERSION_STR)" + env GOOS=linux GOARCH=amd64 $(GO_BUILD) -tags '$(GPBACKMAN)' -o $(GPBACKMAN) -ldflags "-X $(GPBACKMAN_VERSION_STR)" install : - cp $(BIN_DIR)/$(BACKUP) $(BIN_DIR)/$(RESTORE) $(GPHOME)/bin + cp $(BIN_DIR)/$(BACKUP) $(BIN_DIR)/$(RESTORE) $(BIN_DIR)/$(GPBACKMAN) $(GPHOME)/bin @psql -X -t -d template1 -c 'select distinct hostname from gp_segment_configuration where content != -1' > /tmp/seg_hosts 2>/dev/null; \ if [ $$? -eq 0 ]; then \ $(COPYUTIL) -f /tmp/seg_hosts $(helper_path) $(s3plugin_path) =:$(GPHOME)/bin/; \ @@ -119,7 +124,7 @@ install : clean : # Build artifacts - rm -f $(BIN_DIR)/$(BACKUP) $(BACKUP) $(BIN_DIR)/$(RESTORE) $(RESTORE) $(BIN_DIR)/$(HELPER) $(HELPER) $(BIN_DIR)/$(S3PLUGIN) $(S3PLUGIN) + rm -f $(BIN_DIR)/$(BACKUP) $(BACKUP) $(BIN_DIR)/$(RESTORE) $(RESTORE) $(BIN_DIR)/$(HELPER) $(HELPER) $(BIN_DIR)/$(S3PLUGIN) $(S3PLUGIN) $(BIN_DIR)/$(GPBACKMAN) $(GPBACKMAN) # Test artifacts rm -rf /tmp/go-build* /tmp/gexec_artifacts* /tmp/ginkgo* docker stop s3-minio # stop minio before removing its data directories @@ -178,6 +183,7 @@ package: @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO) go build -tags '$(RESTORE)' -o $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/bin/$(RESTORE) --ldflags '-X $(RESTORE_VERSION_STR)' @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO) go build -tags '$(HELPER)' -o $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/bin/$(HELPER) --ldflags '-X $(HELPER_VERSION_STR)' @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO) go build -tags '$(S3PLUGIN)' -o $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/bin/$(S3PLUGIN) --ldflags '-X $(S3PLUGIN_VERSION_STR)' + @GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=$(CGO) go build -tags '$(GPBACKMAN)' -o $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/bin/$(GPBACKMAN) --ldflags '-X $(GPBACKMAN_VERSION_STR)' @echo "Creating install script..." @echo '#!/bin/bash' > $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo 'set -e' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @@ -203,6 +209,7 @@ package: @echo 'sudo chmod 755 "$${INSTALL_DIR}/bin/$(RESTORE)"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo 'sudo chmod 755 "$${INSTALL_DIR}/bin/$(HELPER)"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo 'sudo chmod 755 "$${INSTALL_DIR}/bin/$(S3PLUGIN)"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh + @echo 'sudo chmod 755 "$${INSTALL_DIR}/bin/$(GPBACKMAN)"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo '' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo 'echo "Installation complete!"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh @echo 'echo "$(PACKAGE_NAME) binaries installed to $${INSTALL_DIR}/bin/"' >> $(BUILD_DIR)/$(PACKAGE_NAME)-$(PACKAGE_VERSION)-$(GOOS)-$(GOARCH)/install.sh diff --git a/end_to_end/end_to_end_suite_test.go b/end_to_end/end_to_end_suite_test.go index 5738e924..6a71a0d6 100644 --- a/end_to_end/end_to_end_suite_test.go +++ b/end_to_end/end_to_end_suite_test.go @@ -1,6 +1,7 @@ package end_to_end_test import ( + "database/sql" "encoding/csv" "flag" "fmt" @@ -30,6 +31,7 @@ import ( "github.com/apache/cloudberry-go-libs/structmatcher" "github.com/apache/cloudberry-go-libs/testhelper" "github.com/blang/semver" + _ "github.com/mattn/go-sqlite3" "github.com/pkg/errors" "github.com/spf13/pflag" @@ -69,6 +71,7 @@ var ( schema2TupleCounts map[string]int backupDir string segmentCount int + gpbackmanPath string ) const ( @@ -463,6 +466,38 @@ func moveSegmentBackupFiles(tarBaseName string, extractDirectory string, isMulti } } +// gpbackman helpers + +func gpbackman(args ...string) []byte { + command := exec.Command(gpbackmanPath, args...) + return mustRunCommand(command) +} + +func gpbackmanWithError(args ...string) ([]byte, error) { + command := exec.Command(gpbackmanPath, args...) + return command.CombinedOutput() +} + +func getHistoryDBPathForCluster() string { + mdd := backupCluster.GetDirForContent(-1) + return path.Join(mdd, "gpbackup_history.db") +} + +// queryHistoryDB runs an SQL query against gpbackup_history.db using database/sql and returns the trimmed output. +func queryHistoryDB(historyDB string, query string) string { + db, err := sql.Open("sqlite3", historyDB) + Expect(err).ToNot(HaveOccurred()) + defer db.Close() + + var result string + err = db.QueryRow(query).Scan(&result) + if err == sql.ErrNoRows { + return "" + } + Expect(err).ToNot(HaveOccurred()) + return strings.TrimSpace(result) +} + func TestEndToEnd(t *testing.T) { format.MaxLength = 0 RegisterFailHandler(Fail) @@ -524,6 +559,7 @@ options: oldBackupVersionStr := os.Getenv("OLD_BACKUP_VERSION") _, restoreHelperPath, gprestorePath = buildAndInstallBinaries() + gpbackmanPath = fmt.Sprintf("%s/go/bin/gpbackman", operating.System.Getenv("HOME")) // Precompiled binaries will exist when running the ci job, `backward-compatibility` if _, err := os.Stat(fmt.Sprintf("/tmp/%s", oldBackupVersionStr)); err == nil { @@ -544,6 +580,7 @@ options: gprestorePath = fmt.Sprintf("%s/gprestore", binDir) backupHelperPath = fmt.Sprintf("%s/gpbackup_helper", binDir) restoreHelperPath = backupHelperPath + gpbackmanPath = fmt.Sprintf("%s/gpbackman", binDir) } segConfig := cluster.MustGetSegmentConfiguration(backupConn) backupCluster = cluster.NewCluster(segConfig) diff --git a/end_to_end/gpbackman_test.go b/end_to_end/gpbackman_test.go new file mode 100644 index 00000000..488bf644 --- /dev/null +++ b/end_to_end/gpbackman_test.go @@ -0,0 +1,724 @@ +package end_to_end_test + +import ( + "fmt" + "strings" + + "github.com/apache/cloudberry-backup/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// countBackupInfoLines counts the number of backup entry rows in backup-info +// output. Data rows contain '|' separators but do not contain the header +// label "TIMESTAMP". +func countBackupInfoLines(output []byte) int { + count := 0 + for _, line := range strings.Split(string(output), "\n") { + trimmed := strings.TrimSpace(line) + if trimmed == "" { + continue + } + if strings.Contains(trimmed, "|") && + !strings.Contains(trimmed, "TIMESTAMP") && + !isSeparatorLine(trimmed) { + count++ + } + } + return count +} + +// isSeparatorLine returns true for lines like "---+---+---". +func isSeparatorLine(line string) bool { + for _, c := range line { + if c != '-' && c != '+' && c != ' ' { + return false + } + } + return true +} + +var _ = Describe("gpbackman end to end tests", func() { + BeforeEach(func() { + if useOldBackupVersion { + Skip("gpbackman tests are not applicable in old backup version mode") + } + }) + + // ------------------------------------------------------------------ // + // backup-info + // ------------------------------------------------------------------ // + Describe("backup-info", func() { + var ( + historyDB string + timestampMap map[string]string + ) + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + timestampMap = make(map[string]string) + + // 1. Full local backup (with --leaf-partition-data for incremental compatibility) + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--leaf-partition-data") + timestampMap["full_local"] = getBackupTimestamp(string(output)) + + // 2. Full local backup with --include-table + output = gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--include-table", "public.foo") + timestampMap["full_include_table"] = getBackupTimestamp(string(output)) + + // 3. Full local backup with --exclude-schema + output = gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--exclude-schema", "schema2") + timestampMap["full_exclude_schema"] = getBackupTimestamp(string(output)) + + // 4. Incremental local backup (depends on full_local) + output = gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--incremental", + "--leaf-partition-data") + timestampMap["incremental"] = getBackupTimestamp(string(output)) + + // 5. Metadata-only backup + output = gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--metadata-only") + timestampMap["metadata_only"] = getBackupTimestamp(string(output)) + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("lists all backups", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + ) + lines := countBackupInfoLines(output) + Expect(lines).To(BeNumerically(">=", 5), + fmt.Sprintf("Expected at least 5 backup entries, got %d.\nOutput:\n%s", + lines, string(output))) + }) + + It("filters by type full", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--type", "full", + ) + Expect(string(output)).To(ContainSubstring("full")) + Expect(string(output)).ToNot(ContainSubstring("incremental")) + }) + + It("filters by type incremental", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--type", "incremental", + ) + Expect(string(output)).To(ContainSubstring("incremental")) + lines := countBackupInfoLines(output) + Expect(lines).To(BeNumerically(">=", 1), + "Expected at least 1 incremental backup") + }) + + It("filters by type metadata-only", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--type", "metadata-only", + ) + Expect(string(output)).To(ContainSubstring("metadata-only")) + lines := countBackupInfoLines(output) + Expect(lines).To(BeNumerically(">=", 1), + "Expected at least 1 metadata-only backup") + }) + + It("filters by include-table", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--table", "public.foo", + ) + Expect(string(output)).To(ContainSubstring(timestampMap["full_include_table"])) + }) + + It("filters by exclude-schema", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--schema", "schema2", + "--exclude", + ) + Expect(string(output)).To(ContainSubstring(timestampMap["full_exclude_schema"])) + }) + + It("shows backup chain with --timestamp", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--timestamp", timestampMap["full_local"], + ) + outputStr := string(output) + Expect(outputStr).To(ContainSubstring(timestampMap["full_local"]), + "Expected the specified backup timestamp in the output") + }) + + It("shows detail with --timestamp --detail", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--timestamp", timestampMap["incremental"], + "--detail", + ) + Expect(string(output)).To(Or( + ContainSubstring("OBJECT FILTERING"), + ContainSubstring("object filtering"), + )) + }) + + It("shows detail for all backups with --detail", func() { + output := gpbackman( + "backup-info", + "--history-db", historyDB, + "--detail", + ) + Expect(string(output)).To(Or( + ContainSubstring("OBJECT FILTERING"), + ContainSubstring("object filtering"), + )) + Expect(string(output)).To(ContainSubstring("public.foo")) + }) + + It("rejects incompatible flags --timestamp with --type", func() { + _, err := gpbackmanWithError( + "backup-info", + "--history-db", historyDB, + "--timestamp", timestampMap["full_local"], + "--type", "full", + ) + Expect(err).To(HaveOccurred()) + }) + + It("rejects invalid timestamp format", func() { + _, err := gpbackmanWithError( + "backup-info", + "--history-db", historyDB, + "--timestamp", "invalid", + ) + Expect(err).To(HaveOccurred()) + }) + }) + + // ------------------------------------------------------------------ // + // report-info + // ------------------------------------------------------------------ // + Describe("report-info", func() { + var ( + historyDB string + timestampMap map[string]string + ) + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + timestampMap = make(map[string]string) + + // 1. Full local backup + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestampMap["full_local"] = getBackupTimestamp(string(output)) + + // 2. Plugin backup using example_plugin + copyPluginToAllHosts(backupConn, examplePluginExec) + output = gpbackup(gpbackupPath, backupHelperPath, + "--plugin-config", examplePluginTestConfig) + timestampMap["plugin"] = getBackupTimestamp(string(output)) + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("displays local backup report with --backup-dir", func() { + output := gpbackman( + "report-info", + "--history-db", historyDB, + "--timestamp", timestampMap["full_local"], + "--backup-dir", backupDir, + ) + Expect(string(output)).To(ContainSubstring("Backup Report")) + Expect(string(output)).To(ContainSubstring(timestampMap["full_local"])) + }) + + It("displays local backup report without --backup-dir", func() { + output := gpbackman( + "report-info", + "--history-db", historyDB, + "--timestamp", timestampMap["full_local"], + ) + Expect(string(output)).To(ContainSubstring("Backup Report")) + }) + + It("displays plugin backup report", func() { + ts := timestampMap["plugin"] + reportDir := fmt.Sprintf("/tmp/plugin_dest/%s/%s", ts[:8], ts) + output := gpbackman( + "report-info", + "--history-db", historyDB, + "--timestamp", ts, + "--plugin-config", examplePluginTestConfig, + "--plugin-report-file-path", reportDir, + ) + Expect(string(output)).To(ContainSubstring("Backup Report")) + Expect(string(output)).To(ContainSubstring(ts)) + }) + + It("rejects --plugin-report-file-path without --plugin-config", func() { + _, err := gpbackmanWithError( + "report-info", + "--history-db", historyDB, + "--timestamp", timestampMap["full_local"], + "--plugin-report-file-path", "/tmp/fake_report", + ) + Expect(err).To(HaveOccurred()) + }) + }) + + // ------------------------------------------------------------------ // + // backup-delete + // ------------------------------------------------------------------ // + Describe("backup-delete", func() { + var historyDB string + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("deletes a local backup by timestamp", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty()) + Expect(dateDeleted).ToNot(Equal("In progress")) + + fpInfo := filepath.NewFilePathInfo(backupCluster, backupDir, timestamp, "", false) + backupDirCoordinator := fpInfo.GetDirForContent(-1) + Expect(backupDirCoordinator).ToNot(BeADirectory()) + }) + + It("deletes a local backup without --backup-dir", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty()) + }) + + It("deletes with --cascade for incremental chain", func() { + fullOutput := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--leaf-partition-data") + fullTimestamp := getBackupTimestamp(string(fullOutput)) + + incrOutput := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--incremental", + "--leaf-partition-data") + incrTimestamp := getBackupTimestamp(string(incrOutput)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", fullTimestamp, + "--backup-dir", backupDir, + "--cascade", + ) + + fullDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", fullTimestamp)) + Expect(fullDeleted).ToNot(BeEmpty()) + + incrDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", incrTimestamp)) + Expect(incrDeleted).ToNot(BeEmpty()) + }) + + It("deletes a plugin backup", func() { + copyPluginToAllHosts(backupConn, examplePluginExec) + + output := gpbackup(gpbackupPath, backupHelperPath, + "--plugin-config", examplePluginTestConfig) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--plugin-config", examplePluginTestConfig, + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty()) + Expect(dateDeleted).ToNot(Equal("In progress")) + }) + + It("fails for non-existent timestamp", func() { + _, err := gpbackmanWithError( + "backup-delete", + "--history-db", historyDB, + "--timestamp", "29991231235959", + "--backup-dir", backupDir, + ) + Expect(err).To(HaveOccurred()) + }) + + It("skips already-deleted backup without --force", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + // Second delete without --force should succeed without error. + // gpbackman silently skips already-deleted backups (logged at debug level). + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty(), + "Backup should still be marked as deleted after second delete attempt") + }) + + It("re-deletes with --force", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + "--force", + "--ignore-errors", + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty()) + }) + }) + + // ------------------------------------------------------------------ // + // backup-delete: local file cleanup on segments + // ------------------------------------------------------------------ // + Describe("backup-delete local file cleanup", func() { + var historyDB string + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("removes backup files after deletion", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--single-backup-dir") + timestamp := getBackupTimestamp(string(output)) + + fpInfo := filepath.NewFilePathInfo(backupCluster, backupDir, timestamp, "", true) + backupTimestampDir := fpInfo.GetDirForContent(-1) + Expect(backupTimestampDir).To(BeADirectory(), + "Backup directory should exist before deletion") + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + Expect(backupTimestampDir).ToNot(BeADirectory(), + "Backup directory should be removed after deletion") + }) + }) + + // ------------------------------------------------------------------ // + // backup-clean + // ------------------------------------------------------------------ // + Describe("backup-clean", func() { + var historyDB string + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("cleans local backups with --before-timestamp", func() { + output1 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp1 := getBackupTimestamp(string(output1)) + + output2 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp2 := getBackupTimestamp(string(output2)) + + gpbackman( + "backup-clean", + "--history-db", historyDB, + "--before-timestamp", timestamp2, + "--backup-dir", backupDir, + ) + + dateDeleted1 := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp1)) + Expect(dateDeleted1).ToNot(BeEmpty(), + fmt.Sprintf("Expected backup %s to be deleted", timestamp1)) + }) + + It("cleans local backups with --after-timestamp", func() { + output1 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp1 := getBackupTimestamp(string(output1)) + + output2 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp2 := getBackupTimestamp(string(output2)) + + gpbackman( + "backup-clean", + "--history-db", historyDB, + "--after-timestamp", timestamp1, + "--backup-dir", backupDir, + ) + + dateDeleted2 := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp2)) + Expect(dateDeleted2).ToNot(BeEmpty(), + fmt.Sprintf("Expected backup %s to be deleted", timestamp2)) + }) + + It("cleans plugin backups with --before-timestamp", func() { + copyPluginToAllHosts(backupConn, examplePluginExec) + + output := gpbackup(gpbackupPath, backupHelperPath, + "--plugin-config", examplePluginTestConfig) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-clean", + "--history-db", historyDB, + "--before-timestamp", timestamp, + "--plugin-config", examplePluginTestConfig, + ) + + countStr := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT count(*) FROM backups WHERE timestamp = '%s' AND date_deleted = ''", + timestamp)) + Expect(countStr).ToNot(BeEmpty()) + }) + + It("cleans local backups with --cascade for incremental chain", func() { + fullOutput := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--leaf-partition-data") + fullTimestamp := getBackupTimestamp(string(fullOutput)) + + _ = gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir, + "--incremental", + "--leaf-partition-data") + + gpbackman( + "backup-clean", + "--history-db", historyDB, + "--before-timestamp", fullTimestamp, + "--backup-dir", backupDir, + "--cascade", + ) + // Success if no error was thrown + }) + }) + + // ------------------------------------------------------------------ // + // history-clean + // ------------------------------------------------------------------ // + Describe("history-clean", func() { + var historyDB string + + BeforeEach(func() { + end_to_end_setup() + historyDB = getHistoryDBPathForCluster() + }) + + AfterEach(func() { + end_to_end_teardown() + }) + + It("cleans deleted backup entries from history DB", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + dateDeleted := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT date_deleted FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(dateDeleted).ToNot(BeEmpty()) + + // --before-timestamp uses strictly less than (<), so use a + // far-future cutoff to include the target timestamp. + gpbackman( + "history-clean", + "--history-db", historyDB, + "--before-timestamp", "99991231235959", + ) + + countStr := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT count(*) FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(countStr).To(Equal("0"), + fmt.Sprintf("Expected backup %s to be removed from history DB", timestamp)) + }) + + It("cleans with --older-than-days 0", func() { + output := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp := getBackupTimestamp(string(output)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp, + "--backup-dir", backupDir, + ) + + gpbackman( + "history-clean", + "--history-db", historyDB, + "--older-than-days", "0", + ) + + countStr := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT count(*) FROM backups WHERE timestamp = '%s'", timestamp)) + Expect(countStr).To(Equal("0")) + }) + + It("leaves non-deleted entries intact", func() { + output1 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp1 := getBackupTimestamp(string(output1)) + + output2 := gpbackup(gpbackupPath, backupHelperPath, + "--backup-dir", backupDir) + timestamp2 := getBackupTimestamp(string(output2)) + + gpbackman( + "backup-delete", + "--history-db", historyDB, + "--timestamp", timestamp1, + "--backup-dir", backupDir, + ) + + gpbackman( + "history-clean", + "--history-db", historyDB, + "--older-than-days", "0", + ) + + count1 := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT count(*) FROM backups WHERE timestamp = '%s'", timestamp1)) + Expect(count1).To(Equal("0"), + "Deleted backup should be removed from history") + + count2 := queryHistoryDB(historyDB, + fmt.Sprintf("SELECT count(*) FROM backups WHERE timestamp = '%s'", timestamp2)) + Expect(count2).To(Equal("1"), + "Non-deleted backup should remain in history") + }) + }) + + // ------------------------------------------------------------------ // + // version & help + // ------------------------------------------------------------------ // + Describe("gpbackman --version", func() { + It("prints version information", func() { + output := gpbackman("--version") + Expect(string(output)).To(ContainSubstring("gpbackman")) + }) + }) + + Describe("gpbackman --help", func() { + It("prints help for all subcommands", func() { + for _, subcmd := range []string{ + "backup-info", "backup-delete", "backup-clean", + "history-clean", "report-info", + } { + output := gpbackman(subcmd, "--help") + Expect(string(output)).To(ContainSubstring(subcmd), + fmt.Sprintf("Help for %s should mention the command name", subcmd)) + } + }) + }) +}) diff --git a/gpbackman.go b/gpbackman.go new file mode 100644 index 00000000..e67fc0a3 --- /dev/null +++ b/gpbackman.go @@ -0,0 +1,10 @@ +//go:build gpbackman +// +build gpbackman + +package main + +import . "github.com/apache/cloudberry-backup/gpbackman/cmd" + +func main() { + Execute() +} diff --git a/gpbackman/COMMANDS.md b/gpbackman/COMMANDS.md new file mode 100644 index 00000000..031fb72f --- /dev/null +++ b/gpbackman/COMMANDS.md @@ -0,0 +1,558 @@ +- [Delete all existing backups older than the specified time condition (`backup-clean`)](#delete-all-existing-backups-older-than-the-specified-time-condition-backup-clean) + - [Examples](#examples) + - [Delete all backups from local storage older than the specified time condition](#delete-all-backups-from-local-storage-older-than-the-specified-time-condition) + - [Delete all backups using storage plugin older than n days](#delete-all-backups-using-storage-plugin-older-than-n-days) +- [Delete a specific existing backup (`backup-delete`)](#delete-a-specific-existing-backup-backup-delete) + - [Examples](#examples-1) + - [Delete existing backup from local storage](#delete-existing-backup-from-local-storage) + - [Delete existing backup using storage plugin](#delete-existing-backup-using-storage-plugin) +- [Display information about backups (`backup-info`)](#display-information-about-backups-backup-info) + - [Examples](#examples-2) +- [Clean deleted backups from the history database (`history-clean`)](#clean-deleted-backups-from-the-history-database-history-clean) + - [Examples](#examples-3) + - [Delete information about deleted backups from history database older than n days](#delete-information-about-deleted-backups-from-history-database-older-than-n-days) + - [Delete information about deleted backups from history database older than timestamp](#delete-information-about-deleted-backups-from-history-database-older-than-timestamp) +- [Display the report for a specific backup (`report-info`)](#display-the-report-for-a-specific-backup-report-info) + - [Examples](#examples-4) + - [Display the backup report from local storage](#display-the-backup-report-from-local-storage) + - [Display the backup report using storage plugin](#display-the-backup-report-using-storage-plugin) + +# Delete all existing backups older than the specified time condition (`backup-clean`) + +Available options for `backup-clean` command and their description: +```bash +./gpbackman backup-clean -h +elete all existing backups older than the specified time condition. + +To delete backup sets older than the given timestamp, use the --before-timestamp option. +To delete backup sets older than the given number of days, use the --older-than-day option. +To delete backup sets newer than the given timestamp, use the --after-timestamp option. +Only --older-than-days, --before-timestamp or --after-timestamp option must be specified. + +By default, the existence of dependent backups is checked and deletion process is not performed, +unless the --cascade option is passed in. + +By default, the deletion will be performed for local backup. + +The full path to the backup directory can be set using the --backup-dir option. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the deletion will be performed in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the deletion will be performed in the backup manifest path. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the deletion will be performed in backup folder in the master and segments data directories. + * If backup is not local, the error will be returned. + +For control over the number of parallel processes and ssh connections to delete local backups, the --parallel-processes option can be used. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. In this case, the deletion will be performed using the storage plugin. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the deletion will be performed using the storage plugin. + * If backup is local, the error will be returned. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory. + +Usage: + gpbackman backup-clean [flags] + +Flags: + --after-timestamp string delete backup sets newer than the given timestamp + --backup-dir string the full path to backup directory for local backups + --before-timestamp string delete backup sets older than the given timestamp + --cascade delete all dependent backups + -h, --help help for backup-clean + --older-than-days uint delete backup sets older than the given number of days + --parallel-processes int the number of parallel processes to delete local backups (default 1) + --plugin-config string the full path to plugin config file + +Global Flags: + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") +``` + +## Examples +### Delete all backups from local storage older than the specified time condition + +Delete specific backup : +```bash +./gpbackman backup-clean \ + --before-timestamp 20240701100000 \ + --cascade +``` + +Delete specific backup with specifying the number of parallel processes: +```bash +./gpbackman backup-delete \ + --older-than-days 7 \ + --parallel-processes 5 +``` + +### Delete all backups using storage plugin older than n days +Delete all backups older than 7 days and all dependent backups: +```bash +./gpbackman backup-clean \ + --older-than-days 7 \ + --plugin-config /tmp/gpbackup_plugin_config.yaml \ + --cascade +``` + +# Delete a specific existing backup (`backup-delete`) + +Available options for `backup-delete` command and their description: + +```bash +./gpbackman backup-delete -h +Delete a specific existing backup. + +The --timestamp option must be specified. It could be specified multiple times. + +By default, the existence of dependent backups is checked and deletion process is not performed, +unless the --cascade option is passed in. + +If backup already deleted, the deletion process is skipped, unless --force option is specified. +If errors occur during the deletion process, the errors can be ignored using the --ignore-errors option. +The --ignore-errors option can be used only with --force option. + +By default, the deletion will be performed for local backup. + +The full path to the backup directory can be set using the --backup-dir option. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the deletion will be performed in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the deletion will be performed in the backup manifest path. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the deletion will be performed in backup folder in the master and segments data directories. + * If backup is not local, the error will be returned. + +For control over the number of parallel processes and ssh connections to delete local backups, the --parallel-processes option can be used. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. In this case, the deletion will be performed using the storage plugin. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the deletion will be performed using the storage plugin. + * If backup is local, the error will be returned. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory. + +Usage: + gpbackman backup-delete [flags] + +Flags: + --backup-dir string the full path to backup directory for local backups + --cascade delete all dependent backups for the specified backup timestamp + --force try to delete, even if the backup already mark as deleted + -h, --help help for backup-delete + --ignore-errors ignore errors when deleting backups + --parallel-processes int the number of parallel processes to delete local backups (default 1) + --plugin-config string the full path to plugin config file + --timestamp stringArray the backup timestamp for deleting, could be specified multiple times + +Global Flags: + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") +``` + +## Examples +### Delete existing backup from local storage +Delete specific backup with specifying directory path: +```bash +./gpbackman backup-delete \ + --timestamp 20230809232817 \ + --backup-dir /some/path +``` + +Delete specific backup with specifying the number of parallel processes: +```bash +./gpbackman backup-delete \ + --timestamp 20230809212220 \ + --parallel-processes 5 +``` + +### Delete existing backup using storage plugin +Delete specific backup: +```bash +./gpbackman backup-delete \ + --timestamp 20230725101959 \ + --plugin-config /tmp/gpbackup_plugin_config.yaml +``` + +Delete specific backup and all dependent backups: +```bash +./gpbackman backup-delete \ + --timestamp 20230725101115 \ + --plugin-config /tmp/gpbackup_plugin_config.yaml \ + --cascade +``` + +# Display information about backups (`backup-info`) + +Available options for `backup-info` command and their description: + +```bash +./gpbackman backup-info -h +Display information about backups. + +By default, only active backups or backups with deletion status "In progress" from gpbackup_history.db are displayed. + +To display deleted backups, use the --deleted option. +To display failed backups, use the --failed option. +To display all backups, use --deleted and --failed options together. + +To display backups of a specific type, use the --type option. + +To display backups that include the specified table, use the --table option. +The formatting rules for . match those of the --include-table option in gpbackup. + +To display backups that include the specified schema, use the --schema option. +The formatting rules for match those of the --include-schema option in gpbackup. + +To display backups that exclude the specified table, use the --table and --exclude options. +The formatting rules for .
match those of the --exclude-table option in gpbackup. + +To display backups that exclude the specified schema, use the --schema and --exclude options. +The formatting rules for match those of the --exclude-schema option in gpbackup. + +To display details about object filtering, use the --detail option. +The details are presented as follows, depending on the active filtering type: + * include-table / exclude-table: a comma-separated list of fully-qualified table names in the format .
; + * include-schema / exclude-schema: a comma-separated list of schema names; + * if no object filtering was used, the value is empty. + +To display a backup chain for a specific backup, use the --timestamp option. +In this mode, the backup with the specified timestamp and all of its dependent backups will be displayed. +The deleted and failed backups are always included in this mode. +To display object filtering details in this mode, use the --detail option. +When --timestamp is set, the following options cannot be used: --type, --table, --schema, --exclude, --failed, --deleted. + +To display the "object filtering details" column for all backups without using --timestamp, use the --detail option. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory. + +Usage: + gpbackman backup-info [flags] + +Flags: + --deleted show deleted backups + --detail show object filtering details + --exclude show backups that exclude the specific table (format .
) or schema + --failed show failed backups + -h, --help help for backup-info + --schema string show backups that include the specified schema + --table string show backups that include the specified table (format .
) + --timestamp string show backup info and its dependent backups for the specified timestamp + --type string backup type filter (full, incremental, data-only, metadata-only) + +Global Flags: + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") +``` + +The following information is provided about each backup: +* `TIMESTAMP` - backup name, timestamp (`YYYYMMDDHHMMSS`) when the backup was taken; +* `DATE`- date in format `Mon Jan 02 2006 15:04:05` when the backup was taken; +* `STATUS`- backup status: `Success` or `Failure`; +* `DATABASE` - database name for which the backup was performed (specified by `--dbname` option on the `gpbackup` command). +* `TYPE` - backup type: + - `full` - contains user data, all global and local metadata for the database; + - `incremental` – contains user data, all global and local metadata changed since a previous full backup; + - `metadata-only` – contains only global and local metadata for the database; + - `data-only` – contains only user data from the database. + +* `OBJECT FILTERING` - whether the object filtering options were used when executing the `gpbackup` command: + - `include-schema` – at least one `--include-schema` option was specified; + - `exclude-schema` – at least one `--exclude-schema` option was specified; + - `include-table` – at least one `--include-table` option was specified; + - `exclude-table` – at least one `--exclude-table` option was specified; + - `""` - no options was specified. + +* `PLUGIN` - plugin name that was used to configure the backup destination; +* `DURATION` - backup duration in the format `hh:mm:ss`; +* `DATE DELETED` - backup deletion status: + - `In progress` - the deletion is in progress; + - `Plugin Backup Delete Failed` - last delete attempt failed to delete backup from plugin storage; + - `Local Delete Failed` - last delete attempt failed to delete backup from local storage.; + - `""` - if backup is active; + - date in format `Mon Jan 02 2006 15:04:05` - if backup is deleted and deletion timestamp is set. + +If the `--detail` option is specified, the following additional information is provided: +* `OBJECT FILTERING DETAILS` - details about object filtering: + - if `include-table` or `exclude-table` filtering was used, a comma-separated list of fully-qualified table names in the format `.
`; + - if `include-schema` or `exclude-schema` filtering was used, a comma-separated list of schema names; + - if no object filtering was used, the value is empty. + +If gpbackup is launched without specifying `--metadata-only` flag, but there were no tables that contain data for backup, then gpbackup will only perform a `metadata-only` backup. The logs will contain messages like `No tables in backup set contain data. Performing metadata-only backup instead.` As a result, gpBackMan will display such backups as `metadata-only`. + +## Examples + +Display info for active backups from `gpbackup_history.db`: +```bash +./gpbackman backup-info + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED +----------------+--------------------------+---------+----------+---------------+------------------+--------------------+----------+----------------------------- + 20230809232817 | Wed Aug 09 2023 23:28:17 | Success | demo | full | | | 04:00:03 | + 20230725110051 | Tue Jul 25 2023 11:00:51 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:20 | + 20230725102950 | Tue Jul 25 2023 10:29:50 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:19 | + 20230725102831 | Tue Jul 25 2023 10:28:31 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:18 | + 20230725101959 | Tue Jul 25 2023 10:19:59 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:22 | + 20230725101152 | Tue Jul 25 2023 10:11:52 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:18 | + 20230725101115 | Tue Jul 25 2023 10:11:15 | Success | demo | full | | gpbackup_s3_plugin | 00:00:20 | + 20230724090000 | Mon Jul 24 2023 09:00:00 | Success | demo | metadata-only | | gpbackup_s3_plugin | 00:05:17 | + 20230723082000 | Sun Jul 23 2023 08:20:00 | Success | demo | data-only | | gpbackup_s3_plugin | 00:35:17 | + 20230722100000 | Sat Jul 22 2023 10:00:00 | Success | demo | full | | gpbackup_s3_plugin | 00:25:17 | + 20230721090000 | Fri Jul 21 2023 09:00:00 | Success | demo | metadata-only | | gpbackup_s3_plugin | 00:04:17 | + 20230625110310 | Sun Jun 25 2023 11:03:10 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:40:18 | Plugin Backup Delete Failed + 20230624101152 | Sat Jun 24 2023 10:11:52 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:30:00 | + 20230623101115 | Fri Jun 23 2023 10:11:15 | Success | demo | full | include-table | gpbackup_s3_plugin | 01:01:00 | + 20230524101152 | Wed May 24 2023 10:11:52 | Success | demo | incremental | include-schema | gpbackup_s3_plugin | 00:30:00 | + 20230523101115 | Tue May 23 2023 10:11:15 | Success | demo | full | include-schema | gpbackup_s3_plugin | 01:01:00 | + ``` + +Display info for active full backups from `gpbackup_history.db`: +```bash +./gpbackman backup-info \ + --type full + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED +----------------+--------------------------+---------+----------+------+------------------+--------------------+----------+-------------- + 20230809232817 | Wed Aug 09 2023 23:28:17 | Success | demo | full | | | 04:00:03 | + 20230725101115 | Tue Jul 25 2023 10:11:15 | Success | demo | full | | gpbackup_s3_plugin | 00:00:20 | + 20230722100000 | Sat Jul 22 2023 10:00:00 | Success | demo | full | | gpbackup_s3_plugin | 00:25:17 | + 20230623101115 | Fri Jun 23 2023 10:11:15 | Success | demo | full | include-table | gpbackup_s3_plugin | 01:01:00 | + 20230523101115 | Tue May 23 2023 10:11:15 | Success | demo | full | include-schema | gpbackup_s3_plugin | 01:01:00 | +``` + +Find all backups, including deleted ones, containing the `test1` schema. +```bash +./gpbackman backup-info \ + --deleted \ + --schema test1 + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED +----------------+--------------------------+---------+----------+-------------+------------------+--------------------+----------+-------------------------- + 20230525101152 | Thu May 25 2023 10:11:52 | Success | demo | incremental | include-schema | gpbackup_s3_plugin | 00:30:00 | Sun Jun 25 2023 10:11:52 + 20230524101152 | Wed May 24 2023 10:11:52 | Success | demo | incremental | include-schema | gpbackup_s3_plugin | 00:30:00 | + 20230523101115 | Tue May 23 2023 10:11:15 | Success | demo | full | include-schema | gpbackup_s3_plugin | 01:01:00 | + ``` + +Display info for all backups, including deleted and failed ones, from `gpbackup_history.db`: +```bash +./gpbackman backup-info \ + --deleted \ + --failed \ + --history-db /data/master/gpseg-1/gpbackup_history.db + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED +----------------+--------------------------+---------+----------+---------------+------------------+--------------------+----------+----------------------------- + 20230809232817 | Wed Aug 09 2023 23:28:17 | Success | demo | full | | | 04:00:03 | + 20230806230400 | Sun Aug 06 2023 23:04:00 | Failure | demo | full | | gpbackup_s3_plugin | 00:00:38 | + 20230725110310 | Tue Jul 25 2023 11:03:10 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:18 | Wed Jul 26 2023 11:03:28 + 20230725110051 | Tue Jul 25 2023 11:00:51 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:20 | + 20230725102950 | Tue Jul 25 2023 10:29:50 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:19 | + 20230725102831 | Tue Jul 25 2023 10:28:31 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:18 | + 20230725101959 | Tue Jul 25 2023 10:19:59 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:22 | + 20230725101152 | Tue Jul 25 2023 10:11:52 | Success | demo | incremental | | gpbackup_s3_plugin | 00:00:18 | + 20230725101115 | Tue Jul 25 2023 10:11:15 | Success | demo | full | | gpbackup_s3_plugin | 00:00:20 | + 20230724090000 | Mon Jul 24 2023 09:00:00 | Success | demo | metadata-only | | gpbackup_s3_plugin | 00:05:17 | + 20230723082000 | Sun Jul 23 2023 08:20:00 | Success | demo | data-only | | gpbackup_s3_plugin | 00:35:17 | + 20230722100000 | Sat Jul 22 2023 10:00:00 | Success | demo | full | | gpbackup_s3_plugin | 00:25:17 | + 20230721090000 | Fri Jul 21 2023 09:00:00 | Success | demo | metadata-only | | gpbackup_s3_plugin | 00:04:17 | + 20230706230400 | Thu Jul 06 2023 23:04:00 | Failure | demo | full | | gpbackup_s3_plugin | 00:00:38 | + 20230625110310 | Sun Jun 25 2023 11:03:10 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:40:18 | Plugin Backup Delete Failed + 20230624101152 | Sat Jun 24 2023 10:11:52 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:30:00 | + 20230623101115 | Fri Jun 23 2023 10:11:15 | Success | demo | full | include-table | gpbackup_s3_plugin | 01:01:00 | + 20230606230400 | Tue Jun 06 2023 23:04:00 | Failure | demo | full | | gpbackup_s3_plugin | 00:00:38 | + 20230525101152 | Thu May 25 2023 10:11:52 | Success | demo | incremental | include-schema | gpbackup_s3_plugin | 00:30:00 | Sun Jun 25 2023 10:11:52 + 20230524101152 | Wed May 24 2023 10:11:52 | Success | demo | incremental | include-schema | gpbackup_s3_plugin | 00:30:00 | + 20230523101115 | Tue May 23 2023 10:11:15 | Success | demo | full | include-schema | gpbackup_s3_plugin | 01:01:00 | + ``` + +Display full backup with object filtering details: +```bash +./gpbackman backup-info \ + --type full \ + --detail + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED | OBJECT FILTERING DETAILS +----------------+--------------------------+---------+----------+------+------------------+--------------------+----------+--------------+-------------------------- + 20250915221743 | Mon Sep 15 2025 22:17:43 | Success | demo | full | | | 00:00:01 | | + 20250915221643 | Mon Sep 15 2025 22:16:43 | Success | demo | full | exclude-schema | gpbackup_s3_plugin | 00:00:01 | | sch1 + 20250915221631 | Mon Sep 15 2025 22:16:31 | Success | demo | full | include-table | gpbackup_s3_plugin | 00:00:01 | | sch2.tbl_c, sch2.tbl_d + 20250915221616 | Mon Sep 15 2025 22:16:16 | Success | demo | full | | gpbackup_s3_plugin | 00:00:05 | | + 20250915221553 | Mon Sep 15 2025 22:15:53 | Success | demo | full | exclude-table | | 00:00:02 | | sch1.tbl_b + 20250915221542 | Mon Sep 15 2025 22:15:42 | Success | demo | full | include-table | | 00:00:01 | | sch1.tbl_a + 20250915221531 | Mon Sep 15 2025 22:15:31 | Success | demo | full | | | 00:00:01 | | + +``` + +Display info for the backup chain for a specific backup. In this example, the backup with timestamp `20250913210921` is a full backup, and all its dependent incremental backups are displayed as well: +```bash +./gpbackman backup-info \ + --timestamp 20250913210921 \ + --detail + + TIMESTAMP | DATE | STATUS | DATABASE | TYPE | OBJECT FILTERING | PLUGIN | DURATION | DATE DELETED | OBJECT FILTERING DETAILS +----------------+--------------------------+---------+----------+-------------+------------------+--------------------+----------+--------------------------+-------------------------- + 20250915201446 | Mon Sep 15 2025 20:14:46 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:00:02 | | sch2.tbl_c + 20250915201439 | Mon Sep 15 2025 20:14:39 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:00:01 | | sch2.tbl_c + 20250915201307 | Mon Sep 15 2025 20:13:07 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:00:02 | Mon Sep 15 2025 20:17:56 | sch2.tbl_c + 20250915200929 | Mon Sep 15 2025 20:09:29 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:00:01 | | sch2.tbl_c + 20250913210957 | Sat Sep 13 2025 21:09:57 | Success | demo | incremental | include-table | gpbackup_s3_plugin | 00:00:01 | | sch2.tbl_c + 20250913210921 | Sat Sep 13 2025 21:09:21 | Success | demo | full | include-table | gpbackup_s3_plugin | 00:00:02 | | sch2.tbl_c +``` + +When using the option `--detail`, the column `OBJECT FILTERING DETAILS` may contain a large output. For pretty display, you can use `less -XS`: +```bash +./gpbackman backup-info --detail | less -XS +``` + +# Clean deleted backups from the history database (`history-clean`) + +Available options for `history-clean` command and their description: + +```bash +./gpbackman history-clean -h +Clean deleted backups from the history database. +Only the database is being cleaned up. + +Information is deleted only about deleted backups from gpbackup_history.db. Each backup must be deleted first. + +To delete information about backups older than the given timestamp, use the --before-timestamp option. +To delete information about backups older than the given number of days, use the --older-than-day option. +Only --older-than-days or --before-timestamp option must be specified, not both. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory. + +Usage: + gpbackman history-clean [flags] + +Flags: + --before-timestamp string delete information about backups older than the given timestamp + -h, --help help for history-clean + --older-than-days uint delete information about backups older than the given number of days + +Global Flags: + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") +``` + +## Examples +### Delete information about deleted backups from history database older than n days +Delete information about deleted backups from history database older than 7 days: +```bash +./gpbackman history-clean \ + --older-than-days 7 \ +``` + +### Delete information about deleted backups from history database older than timestamp +Delete information about deleted backups from history database older than timestamp `20240101100000`: +```bash +./gpbackman history-clean \ + --before-timestamp 20240101100000 \ +``` + +# Display the report for a specific backup (`report-info`) + +Available options for `report-info` command and their description: + +```bash +./gpbackman.go report-info -h +Display the report for a specific backup. + +The --timestamp option must be specified. + +The report could be displayed only for active backups. + +The full path to the backup directory can be set using the --backup-dir option. +The full path to the data directory is required. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the report will be searched in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the report will be searched in provided path from backup manifest. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the utility try to connect to local cluster and get master data directory. + If this information is available, the report will be in master data directory. + * If backup is not local, the error will be returned. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the report will be searched in provided location. + * If backup is local, the error will be returned. + +Only --backup-dir or --plugin-config option can be specified, not both. + +If a custom plugin is used, it is required to specify the path to the directory with the repo file using the --plugin-report-file-path option. +It is not necessary to use the --plugin-report-file-path flag for the following plugins (the path is generated automatically): + * gpbackup_s3_plugin. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory. + +Usage: + gpbackman report-info [flags] + +Flags: + --backup-dir string the full path to backup directory + -h, --help help for report-info + --plugin-config string the full path to plugin config file + --plugin-report-file-path string the full path to plugin report file + --timestamp string the backup timestamp for report displaying + +Global Flags: + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") +``` + +## Examples +### Display the backup report from local storage + +With specifying backup directory path: +```bash +./gpbackman report-info \ + --timestamp 20230809232817 \ + --backup-dir /some/path +``` + +With specifying backup directory path: +```bash +./gpbackman report-info \ + --timestamp 20230809232817 \ +``` + +### Display the backup report using storage plugin + +For `gpbackup_s3_plugin`: +```bash +./gpbackman report-info \ + --timestamp 20230725101959 \ + --plugin-config /tmp/gpbackup_plugin_config.yaml +``` + +For other plugins: +```bash +./gpbackman report-infodoc \ + --timestamp 20230725101959 \ + --plugin-config /tmp/gpbackup_plugin_config.yaml \ + --plugin-report-file-path /some/path/to/report +``` diff --git a/gpbackman/README.md b/gpbackman/README.md new file mode 100644 index 00000000..9c6fb72c --- /dev/null +++ b/gpbackman/README.md @@ -0,0 +1,58 @@ +# gpBackMan + +**gpBackMan** is designed to manage backups created by gpbackup. + +The utility works with `gpbackup_history.db` SQLite history database format. + +**gpBackMan** provides the following features: +* display information about backups; +* display the backup report for existing backups; +* delete existing backups from local storage or using storage plugins; +* delete all existing backups from local storage or using storage plugins older than the specified time condition; +* clean deleted backups from the history database; + +## Commands +### Introduction + +Available commands and global options: + +```bash +./gpbackman --help +gpBackMan - utility for managing backups created by gpbackup + +Usage: + gpbackman [command] + +Available Commands: + backup-clean Delete all existing backups older than the specified time condition + backup-delete Delete a specific existing backup + backup-info Display information about backups + completion Generate the autocompletion script for the specified shell + help Help about any command + history-clean Clean deleted backups from the history database + report-info Display the report for a specific backup + +Flags: + -h, --help help for gpbackman + --history-db string full path to the gpbackup_history.db file + --log-file string full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory + --log-level-console string level for console logging (error, info, debug, verbose) (default "info") + --log-level-file string level for file logging (error, info, debug, verbose) (default "info") + -v, --version version for gpbackman + +Use "gpbackman [command] --help" for more information about a command. +``` + +### Detail info about commands + +Description of each command: +* [Delete all existing backups older than the specified time condition (`backup-clean`)](./COMMANDS.md#delete-all-existing-backups-older-than-the-specified-time-condition-backup-clean) +* [Delete a specific existing backup (`backup-delete`)](./COMMANDS.md#delete-a-specific-existing-backup-backup-delete) +* [Display information about backups (`backup-info`)](./COMMANDS.md#display-information-about-backups-backup-info) +* [Clean deleted backups from the history database (`history-clean`)](./COMMANDS.md#clean-deleted-backups-from-the-history-database-history-clean) +* [Display the report for a specific backup (`report-info`)](./COMMANDS.md#display-the-report-for-a-specific-backup-report-info) + +## About + +gpBackMan is part of the Apache Cloudberry Backup (Incubating) toolset. It is based on the original [gpbackman](https://github.com/woblerr/gpbackman) project. + diff --git a/gpbackman/cmd/backup_clean.go b/gpbackman/cmd/backup_clean.go new file mode 100644 index 00000000..eb46ddba --- /dev/null +++ b/gpbackman/cmd/backup_clean.go @@ -0,0 +1,275 @@ +package cmd + +import ( + "database/sql" + "strconv" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-backup/utils" +) + +// Flags for the gpbackman backup-clean command (backupCleanCmd) +var ( + backupCleanBeforeTimestamp string + backupCleanAfterTimestamp string + backupCleanPluginConfigFile string + backupCleanBackupDir string + backupCleanOlderThanDays uint + backupCleanParallelProcesses int + backupCleanCascade bool +) + +var backupCleanCmd = &cobra.Command{ + Use: "backup-clean", + Short: "Delete all existing backups older than the specified time condition", + Long: `Delete all existing backups older than the specified time condition. + +To delete backup sets older than the given timestamp, use the --before-timestamp option. +To delete backup sets older than the given number of days, use the --older-than-day option. +To delete backup sets newer than the given timestamp, use the --after-timestamp option. +Only --older-than-days, --before-timestamp or --after-timestamp option must be specified. + +By default, the existence of dependent backups is checked and deletion process is not performed, +unless the --cascade option is passed in. + +By default, the deletion will be performed for local backup. + +The full path to the backup directory can be set using the --backup-dir option. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the deletion will be performed in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the deletion will be performed in the backup manifest path. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the deletion will be performed in backup folder in the master and segments data directories. + * If backup is not local, the error will be returned. + +For control over the number of parallel processes and ssh connections to delete local backups, the --parallel-processes option can be used. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. In this case, the deletion will be performed using the storage plugin. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the deletion will be performed using the storage plugin. + * If backup is local, the error will be returned. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory.`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + doRootFlagValidation(cmd.Flags(), checkFileExistsConst) + doCleanBackupFlagValidation(cmd.Flags()) + doCleanBackup() + }, +} + +func init() { + rootCmd.AddCommand(backupCleanCmd) + backupCleanCmd.PersistentFlags().StringVar( + &backupCleanPluginConfigFile, + pluginConfigFileFlagName, + "", + "the full path to plugin config file", + ) + backupCleanCmd.PersistentFlags().BoolVar( + &backupCleanCascade, + cascadeFlagName, + false, + "delete all dependent backups", + ) + backupCleanCmd.PersistentFlags().UintVar( + &backupCleanOlderThanDays, + olderThanDaysFlagName, + 0, + "delete backup sets older than the given number of days", + ) + backupCleanCmd.PersistentFlags().StringVar( + &backupCleanBeforeTimestamp, + beforeTimestampFlagName, + "", + "delete backup sets older than the given timestamp", + ) + backupCleanCmd.PersistentFlags().StringVar( + &backupCleanAfterTimestamp, + afterTimestampFlagName, + "", + "delete backup sets newer than the given timestamp", + ) + backupCleanCmd.PersistentFlags().StringVar( + &backupCleanBackupDir, + backupDirFlagName, + "", + "the full path to backup directory for local backups", + ) + backupCleanCmd.PersistentFlags().IntVar( + &backupCleanParallelProcesses, + parallelProcessesFlagName, + 1, + "the number of parallel processes to delete local backups", + ) + backupCleanCmd.MarkFlagsMutuallyExclusive(beforeTimestampFlagName, olderThanDaysFlagName, afterTimestampFlagName) +} + +// These flag checks are applied only for backup-clean command. +func doCleanBackupFlagValidation(flags *pflag.FlagSet) { + var err error + // If before-timestamp flag is specified and have correct values. + if flags.Changed(beforeTimestampFlagName) { + err = gpbckpconfig.CheckTimestamp(backupCleanBeforeTimestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupCleanBeforeTimestamp, beforeTimestampFlagName, err)) + execOSExit(exitErrorCode) + } + beforeTimestamp = backupCleanBeforeTimestamp + } + if flags.Changed(olderThanDaysFlagName) { + beforeTimestamp = gpbckpconfig.GetTimestampOlderThan(backupCleanOlderThanDays) + } + // If after-timestamp flag is specified and have correct values. + if flags.Changed(afterTimestampFlagName) { + err = gpbckpconfig.CheckTimestamp(backupCleanAfterTimestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupCleanAfterTimestamp, afterTimestampFlagName, err)) + execOSExit(exitErrorCode) + } + afterTimestamp = backupCleanAfterTimestamp + } + // backup-dir anf plugin-config flags cannot be used together. + err = checkCompatibleFlags(flags, backupDirFlagName, pluginConfigFileFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, backupDirFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // If parallel-processes flag is specified and have correct values. + if flags.Changed(parallelProcessesFlagName) && !gpbckpconfig.IsPositiveValue(backupCleanParallelProcesses) { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(strconv.Itoa(backupCleanParallelProcesses), parallelProcessesFlagName, err)) + execOSExit(exitErrorCode) + } + // plugin-config and parallel-precesses flags cannot be used together. + err = checkCompatibleFlags(flags, parallelProcessesFlagName, pluginConfigFileFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, parallelProcessesFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // If backup-dir flag is specified and it exists and the full path is specified. + if flags.Changed(backupDirFlagName) { + err = gpbckpconfig.CheckFullPath(backupCleanBackupDir, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupCleanBackupDir, backupDirFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If plugin-config flag is specified and it exists and the full path is specified. + if flags.Changed(pluginConfigFileFlagName) { + err = gpbckpconfig.CheckFullPath(backupCleanPluginConfigFile, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupCleanPluginConfigFile, pluginConfigFileFlagName, err)) + execOSExit(exitErrorCode) + } + } + if beforeTimestamp == "" && afterTimestamp == "" { + gplog.Error("%s", textmsg.ErrorTextUnableValidateValue(textmsg.ErrorValidationValue(), olderThanDaysFlagName, beforeTimestampFlagName, afterTimestampFlagName)) + execOSExit(exitErrorCode) + } +} + +func doCleanBackup() { + logHeadersDebug() + err := cleanBackup() + if err != nil { + execOSExit(exitErrorCode) + } +} + +func cleanBackup() error { + hDB, err := gpbckpconfig.OpenHistoryDB(getHistoryDBPath(rootHistoryDB)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("open", err)) + return err + } + defer func() { + closeErr := hDB.Close() + if closeErr != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("close", closeErr)) + } + }() + if backupCleanPluginConfigFile != "" { + pluginConfig, err := utils.ReadPluginConfig(backupCleanPluginConfigFile) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadPluginConfigFile(err)) + return err + } + err = backupCleanDBPlugin(backupCleanCascade, beforeTimestamp, afterTimestamp, backupCleanPluginConfigFile, pluginConfig, hDB) + if err != nil { + return err + } + } else { + err := backupCleanDBLocal(backupCleanCascade, beforeTimestamp, afterTimestamp, backupCleanBackupDir, backupCleanParallelProcesses, hDB) + if err != nil { + return err + } + } + return nil +} + +func backupCleanDBPlugin(deleteCascade bool, cutOffTimestamp, cutOffAfterTimestamp, pluginConfigPath string, pluginConfig *utils.PluginConfig, hDB *sql.DB) error { + backupList, err := fetchBackupNamesForDeletion(cutOffTimestamp, cutOffAfterTimestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadHistoryDB(err)) + return err + } + if len(backupList) > 0 { + gplog.Debug("%s", textmsg.InfoTextBackupDeleteList(backupList)) + // Execute deletion for each backup. + // Use backupDeleteDBPlugin function from backup-delete command. + // Don't use force deletes and ignore errors for mass deletion. + err = backupDeleteDBPlugin(backupList, deleteCascade, false, false, pluginConfigPath, pluginConfig, hDB) + if err != nil { + return err + } + } else { + gplog.Info("%s", textmsg.InfoTextNothingToDo()) + } + return nil +} + +func backupCleanDBLocal(deleteCascade bool, cutOffTimestamp, cutOffAfterTimestamp, backupDir string, maxParallelProcesses int, hDB *sql.DB) error { + backupList, err := fetchBackupNamesForDeletion(cutOffTimestamp, cutOffAfterTimestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadHistoryDB(err)) + return err + } + if len(backupList) > 0 { + gplog.Debug("%s", textmsg.InfoTextBackupDeleteList(backupList)) + err = backupDeleteDBLocal(backupList, backupDir, deleteCascade, false, false, maxParallelProcesses, hDB) + if err != nil { + return err + } + } else { + gplog.Info("%s", textmsg.InfoTextNothingToDo()) + } + return nil +} + +// Get the list of backup names for deletion. +func fetchBackupNamesForDeletion(cutOffTimestamp, cutOffAfterTimestamp string, hDB *sql.DB) ([]string, error) { + var backupList []string + var err error + if cutOffTimestamp != "" { + backupList, err = gpbckpconfig.GetBackupNamesBeforeTimestamp(cutOffTimestamp, hDB) + if err != nil { + return nil, err + } + } + if cutOffAfterTimestamp != "" { + backupList, err = gpbckpconfig.GetBackupNamesAfterTimestamp(cutOffAfterTimestamp, hDB) + if err != nil { + return nil, err + } + } + return backupList, nil +} diff --git a/gpbackman/cmd/backup_delete.go b/gpbackman/cmd/backup_delete.go new file mode 100644 index 00000000..34647d93 --- /dev/null +++ b/gpbackman/cmd/backup_delete.go @@ -0,0 +1,521 @@ +package cmd + +import ( + "bytes" + "database/sql" + "fmt" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/apache/cloudberry-go-libs/operating" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-backup/history" + "github.com/apache/cloudberry-backup/utils" +) + +// Flags for the gpbackman backup-delete command (backupDeleteCmd) +var ( + backupDeleteTimestamp []string + backupDeletePluginConfigFile string + backupDeleteBackupDir string + backupDeleteCascade bool + backupDeleteForce bool + backupDeleteIgnoreErrors bool + backupDeleteParallelProcesses int +) +var backupDeleteCmd = &cobra.Command{ + Use: "backup-delete", + Short: "Delete a specific existing backup", + Long: `Delete a specific existing backup. + +The --timestamp option must be specified. It could be specified multiple times. + +By default, the existence of dependent backups is checked and deletion process is not performed, +unless the --cascade option is passed in. + +If backup already deleted, the deletion process is skipped, unless --force option is specified. +If errors occur during the deletion process, the errors can be ignored using the --ignore-errors option. +The --ignore-errors option can be used only with --force option. + +By default, the deletion will be performed for local backup. + +The full path to the backup directory can be set using the --backup-dir option. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the deletion will be performed in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the deletion will be performed in the backup manifest path. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the deletion will be performed in backup folder in the master and segments data directories. + * If backup is not local, the error will be returned. + +For control over the number of parallel processes and ssh connections to delete local backups, the --parallel-processes option can be used. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. In this case, the deletion will be performed using the storage plugin. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the deletion will be performed using the storage plugin. + * If backup is local, the error will be returned. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory.`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + doRootFlagValidation(cmd.Flags(), checkFileExistsConst) + doDeleteBackupFlagValidation(cmd.Flags()) + doDeleteBackup() + }, +} + +var execCommand = exec.Command + +func init() { + rootCmd.AddCommand(backupDeleteCmd) + backupDeleteCmd.PersistentFlags().StringArrayVar( + &backupDeleteTimestamp, + timestampFlagName, + []string{""}, + "the backup timestamp for deleting, could be specified multiple times", + ) + backupDeleteCmd.PersistentFlags().StringVar( + &backupDeletePluginConfigFile, + pluginConfigFileFlagName, + "", + "the full path to plugin config file", + ) + backupDeleteCmd.PersistentFlags().BoolVar( + &backupDeleteCascade, + cascadeFlagName, + false, + "delete all dependent backups for the specified backup timestamp", + ) + backupDeleteCmd.PersistentFlags().BoolVar( + &backupDeleteForce, + forceFlagName, + false, + "try to delete, even if the backup already mark as deleted", + ) + backupDeleteCmd.PersistentFlags().StringVar( + &backupDeleteBackupDir, + backupDirFlagName, + "", + "the full path to backup directory for local backups", + ) + backupDeleteCmd.PersistentFlags().IntVar( + &backupDeleteParallelProcesses, + parallelProcessesFlagName, + 1, + "the number of parallel processes to delete local backups", + ) + backupDeleteCmd.PersistentFlags().BoolVar( + &backupDeleteIgnoreErrors, + ignoreErrorsFlagName, + false, + "ignore errors when deleting backups", + ) + _ = backupDeleteCmd.MarkPersistentFlagRequired(timestampFlagName) +} + +// These flag checks are applied only for backup-delete command. +func doDeleteBackupFlagValidation(flags *pflag.FlagSet) { + var err error + // If timestamps are specified and have correct values. + if flags.Changed(timestampFlagName) { + for _, timestamp := range backupDeleteTimestamp { + err = gpbckpconfig.CheckTimestamp(timestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(timestamp, timestampFlagName, err)) + execOSExit(exitErrorCode) + } + } + } + // backup-dir anf plugin-config flags cannot be used together. + err = checkCompatibleFlags(flags, backupDirFlagName, pluginConfigFileFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, backupDirFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // If parallel-processes flag is specified and have correct values. + if flags.Changed(parallelProcessesFlagName) && !gpbckpconfig.IsPositiveValue(backupDeleteParallelProcesses) { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(strconv.Itoa(backupDeleteParallelProcesses), parallelProcessesFlagName, err)) + execOSExit(exitErrorCode) + } + // plugin-config and parallel-precesses flags cannot be used together. + err = checkCompatibleFlags(flags, parallelProcessesFlagName, pluginConfigFileFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, parallelProcessesFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // If backup-dir flag is specified and it exists and the full path is specified. + if flags.Changed(backupDirFlagName) { + err = gpbckpconfig.CheckFullPath(backupDeleteBackupDir, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupDeleteBackupDir, backupDirFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If the plugin-config flag is specified and it exists and the full path is specified. + if flags.Changed(pluginConfigFileFlagName) { + err = gpbckpconfig.CheckFullPath(backupDeletePluginConfigFile, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupDeletePluginConfigFile, pluginConfigFileFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If ignore-errors flag is specified, but force flag is not. + if flags.Changed(ignoreErrorsFlagName) && !flags.Changed(forceFlagName) { + gplog.Error("%s", textmsg.ErrorTextUnableValidateValue(textmsg.ErrorNotIndependentFlagsError(), ignoreErrorsFlagName, forceFlagName)) + execOSExit(exitErrorCode) + } + +} + +func doDeleteBackup() { + logHeadersDebug() + err := deleteBackup() + if err != nil { + execOSExit(exitErrorCode) + } +} + +func deleteBackup() error { + hDB, err := gpbckpconfig.OpenHistoryDB(getHistoryDBPath(rootHistoryDB)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("open", err)) + return err + } + defer func() { + closeErr := hDB.Close() + if closeErr != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("close", closeErr)) + } + }() + if backupDeletePluginConfigFile != "" { + pluginConfig, err := utils.ReadPluginConfig(backupDeletePluginConfigFile) + if err != nil { + return err + } + err = backupDeleteDBPlugin(backupDeleteTimestamp, backupDeleteCascade, backupDeleteForce, backupDeleteIgnoreErrors, backupDeletePluginConfigFile, pluginConfig, hDB) + if err != nil { + return err + } + } else { + err := backupDeleteDBLocal(backupDeleteTimestamp, backupDeleteBackupDir, backupDeleteCascade, backupDeleteForce, backupDeleteIgnoreErrors, backupDeleteParallelProcesses, hDB) + if err != nil { + return err + } + } + return nil +} + +func backupDeleteDBPlugin(backupListForDeletion []string, deleteCascade, deleteForce, ignoreErrors bool, pluginConfigPath string, pluginConfig *utils.PluginConfig, hDB *sql.DB) error { + deleter := &backupPluginDeleter{ + pluginConfigPath: pluginConfigPath, + pluginConfig: pluginConfig} + // Skip local backups. + skipLocalBackup := true + return backupDeleteDB(backupListForDeletion, deleteCascade, deleteForce, ignoreErrors, skipLocalBackup, deleter, hDB) +} + +func backupDeleteDBLocal(backupListForDeletion []string, backupDir string, deleteCascade, deleteForce, ignoreErrors bool, maxParallelProcesses int, hDB *sql.DB) error { + deleter := &backupLocalDeleter{ + backupDir: backupDir, + maxParallelProcesses: maxParallelProcesses} + // Include local backups. + skipLocalBackups := false + return backupDeleteDB(backupListForDeletion, deleteCascade, deleteForce, ignoreErrors, skipLocalBackups, deleter, hDB) +} + +func backupDeleteDB(backupListForDeletion []string, deleteCascade, deleteForce, ignoreErrors, skipLocalBackup bool, deleter backupDeleteInterface, hDB *sql.DB) error { + for _, backupName := range backupListForDeletion { + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(backupName, err)) + return err + } + canBeDeleted, err := checkBackupCanBeUsed(deleteForce, skipLocalBackup, backupData) + if err != nil { + return err + } + if canBeDeleted { + backupDependencies, err := gpbckpconfig.GetBackupDependencies(backupName, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("dependencies", backupName, err)) + return err + } + if len(backupDependencies) > 0 { + gplog.Info("%s", textmsg.InfoTextBackupDependenciesList(backupName, backupDependencies)) + if deleteCascade { + gplog.Debug("%s", textmsg.InfoTextBackupDeleteList(backupDependencies)) + // If the deletion of at least one dependent backup fails, we fail full entire chain. + err = backupDeleteDBCascade(backupDependencies, deleteForce, ignoreErrors, skipLocalBackup, deleter, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableDeleteBackupCascade(backupName, err)) + return err + } + } else { + gplog.Error("%s", textmsg.ErrorTextUnableDeleteBackupUseCascade(backupName, textmsg.ErrorBackupDeleteCascadeOptionError())) + return textmsg.ErrorBackupDeleteCascadeOptionError() + } + } + err = deleter.backupDeleteDB(backupName, hDB, ignoreErrors) + if err != nil { + return err + } + } + } + return nil +} + +func backupDeleteDBCascade(backupList []string, deleteForce, ignoreErrors, skipLocalBackup bool, deleter backupDeleteInterface, hDB *sql.DB) error { + for _, backup := range backupList { + backupData, err := gpbckpconfig.GetBackupDataDB(backup, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(backup, err)) + return err + } + // Skip local backup. + canBeDeleted, err := checkBackupCanBeUsed(deleteForce, skipLocalBackup, backupData) + if err != nil { + return err + } + if canBeDeleted { + err = deleter.backupDeleteDB(backup, hDB, ignoreErrors) + if err != nil { + return err + } + } + } + return nil +} + +func backupDeleteDBPluginFunc(backupName, pluginConfigPath string, pluginConfig *utils.PluginConfig, hDB *sql.DB, ignoreErrors bool) error { + var err error + dateDeleted := history.CurrentTimestamp() + gplog.Info("%s", textmsg.InfoTextBackupDeleteStart(backupName)) + err = gpbckpconfig.UpdateDeleteStatus(backupName, gpbckpconfig.DateDeletedInProgress, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableSetBackupStatus(gpbckpconfig.DateDeletedInProgress, backupName, err)) + return err + } + gplog.Debug("%s", textmsg.InfoTextCommandExecution(pluginConfig.ExecutablePath, deleteBackupPluginCommand, pluginConfigPath, backupName)) + stdout, stderr, errdel := execDeleteBackupPlugin(pluginConfig.ExecutablePath, deleteBackupPluginCommand, pluginConfigPath, backupName) + if stderr != "" { + gplog.Error("%s", stderr) + } + if errdel != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableDeleteBackup(backupName, errdel), gpbckpconfig.DateDeletedPluginFailed, hDB) + if !ignoreErrors { + return errdel + } + } + gplog.Info("%s", stdout) + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupInfo(backupName, err), gpbckpconfig.DateDeletedPluginFailed, hDB) + if !ignoreErrors { + return err + } + } + bckpDir, _, _, err := getBackupMasterDir("", backupData.BackupDir, backupData.DatabaseName) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupPath("backup directory", backupName, err), gpbckpconfig.DateDeletedPluginFailed, hDB) + if !ignoreErrors { + return err + } + } + gplog.Debug("%s", textmsg.InfoTextCommandExecution("delete directory", gpbckpconfig.BackupDirPath(bckpDir, backupName))) + // Delete local files on master. + err = os.RemoveAll(gpbckpconfig.BackupDirPath(bckpDir, backupName)) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableDeleteBackup(backupName, err), gpbckpconfig.DateDeletedPluginFailed, hDB) + if !ignoreErrors { + return err + } + } + err = gpbckpconfig.UpdateDeleteStatus(backupName, dateDeleted, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableSetBackupStatus(dateDeleted, backupName, err)) + return err + } + gplog.Info("%s", textmsg.InfoTextBackupDeleteSuccess(backupName)) + return nil +} + +func backupDeleteDBLocalFunc(backupName, backupDir string, maxParallelProcesses int, hDB *sql.DB, ignoreErrors bool) error { + var err, errUpdate error + dateDeleted := history.CurrentTimestamp() + gplog.Info("%s", textmsg.InfoTextBackupDeleteStart(backupName)) + errUpdate = gpbckpconfig.UpdateDeleteStatus(backupName, gpbckpconfig.DateDeletedInProgress, hDB) + if errUpdate != nil { + gplog.Error("%s", textmsg.ErrorTextUnableSetBackupStatus(gpbckpconfig.DateDeletedInProgress, backupName, errUpdate)) + return errUpdate + } + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupInfo(backupName, err), gpbckpconfig.DateDeletedLocalFailed, hDB) + return err + } + bckpDir, segPrefix, isSingleBackupDir, err := getBackupMasterDir(backupDir, backupData.BackupDir, backupData.DatabaseName) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupPath("backup directory", backupName, err), gpbckpconfig.DateDeletedLocalFailed, hDB) + return err + } + gplog.Debug("%s", textmsg.InfoTextBackupDirPath(bckpDir)) + gplog.Debug("%s", textmsg.InfoTextSegmentPrefix(segPrefix)) + backupType, err := gpbckpconfig.GetBackupType(backupData) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupValue("type", backupName, err), gpbckpconfig.DateDeletedLocalFailed, hDB) + return err + } + // If backup type is not "metadata-only", we should delete files on segments and master. + // If backup type is "metadata-only", we should not delete files only on master. + if backupType != gpbckpconfig.BackupTypeMetadataOnly { + var errSeg error + segConfig, errSeg := getSegmentConfigurationClusterInfo(backupData.DatabaseName) + if errSeg != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableGetBackupPath("segment configuration", backupName, errSeg), gpbckpconfig.DateDeletedLocalFailed, hDB) + if !ignoreErrors { + return errSeg + } + } + // Execute on segments. + errSeg = executeDeleteBackupOnSegments(backupDir, backupData.BackupDir, backupName, segPrefix, isSingleBackupDir, ignoreErrors, segConfig, maxParallelProcesses) + if errSeg != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableDeleteBackup(backupName, errSeg), gpbckpconfig.DateDeletedLocalFailed, hDB) + if !ignoreErrors { + return errSeg + } + } + } + // Delete files on master. + gplog.Debug("%s", textmsg.InfoTextCommandExecution("delete directory", gpbckpconfig.BackupDirPath(bckpDir, backupName))) + err = os.RemoveAll(gpbckpconfig.BackupDirPath(bckpDir, backupName)) + if err != nil { + handleErrorDB(backupName, textmsg.ErrorTextUnableDeleteBackup(backupName, err), gpbckpconfig.DateDeletedLocalFailed, hDB) + if !ignoreErrors { + return err + } + } + errUpdate = gpbckpconfig.UpdateDeleteStatus(backupName, dateDeleted, hDB) + if errUpdate != nil { + gplog.Error("%s", textmsg.ErrorTextUnableSetBackupStatus(dateDeleted, backupName, errUpdate)) + return errUpdate + } + gplog.Info("%s", textmsg.InfoTextBackupDeleteSuccess(backupName)) + return nil +} + +func execDeleteBackupPlugin(executablePath, deleteBackupPluginCommand, pluginConfigFile, timestamp string) (string, string, error) { + cmd := execCommand(executablePath, deleteBackupPluginCommand, pluginConfigFile, timestamp) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err +} + +// ExecuteCommandsOnHosts Delete backup dir on all segment hosts in parallel. +// The function checks that the directories exists on all segment hosts before deletion. +func executeDeleteBackupOnSegments(backupDir, backupDataBackupDir, backupName, segPrefix string, isSingleBackupDir, ignoreErrors bool, configs []gpbckpconfig.SegmentConfig, maxParallelProcesses int) error { + var once sync.Once + limit := make(chan bool, maxParallelProcesses) + wg := &sync.WaitGroup{} + errCh := make(chan error, len(configs)) + currentUser, _ := operating.System.CurrentUser() + userName := currentUser.Username + // Check that the directory exists on all segment hosts. + for _, config := range configs { + wg.Add(1) + limit <- true + backupPath, err := getBackupSegmentDir(backupDir, backupDataBackupDir, config.DataDir, segPrefix, config.ContentID, isSingleBackupDir) + if err != nil { + return err + } + go func(backupPath, host string) { + defer func() { <-limit }() + defer wg.Done() + checkBackupDirExistsOnSegments(gpbckpconfig.BackupDirPath(backupPath, backupName), host, userName, errCh) + }(backupPath, config.Hostname) + } + // We should block the main function and wait for the WaitGroup to complete. + // It is necessary to strictly verify that all checks are performed for a specific backup + // and this particular backup will be deleted. + // Only after deleting backups can we move on to the next one. + // It is necessary to avoid situations where checks are performed simultaneously for one backup, + // and deletion occurs for another. + // + // Don't use code like + // go func() { wg.Wait(); once.Do(func() { close(errCh) }) }() + // + wg.Wait() + // Fix error like "panic: close of closed channel". + once.Do(func() { + close(errCh) + }) + for err := range errCh { + if err != nil && !ignoreErrors { + return err + } + } + // If all checks passed, delete the directory on all segment hosts. + // Reset the wait group. + wg = &sync.WaitGroup{} + for _, config := range configs { + wg.Add(1) + limit <- true + backupPath, err := getBackupSegmentDir(backupDir, backupDataBackupDir, config.DataDir, segPrefix, config.ContentID, isSingleBackupDir) + if err != nil { + return err + } + go func(backupPath, host string) { + defer func() { <-limit }() + defer wg.Done() + deleteBackupDirOnSegments(gpbckpconfig.BackupDirPath(backupPath, backupName), host, userName, errCh) + }(backupPath, config.Hostname) + } + wg.Wait() + // Fix error like "panic: close of closed channel". + once.Do(func() { + close(errCh) + }) + for err := range errCh { + if err != nil && !ignoreErrors { + return err + } + } + return nil +} +func runSSHCommand(remoteCmd, host, userName string) ([]byte, error) { + cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", fmt.Sprintf("%s@%s", userName, host), remoteCmd) + return cmd.CombinedOutput() +} + +func checkBackupDirExistsOnSegments(path, host, userName string, errCh chan error) { + command := fmt.Sprintf("test -d %s", path) + gplog.Debug("%s", textmsg.InfoTextCommandExecution(command, "on host", host)) + if _, err := runSSHCommand(command, host, userName); err != nil { + gplog.Error("%s", textmsg.ErrorTextCommandExecutionFailed(err, command, "on host", host)) + errCh <- textmsg.ErrorNotFoundBackupDirIn(fmt.Sprintf("%s on host %s", path, host)) + return + } + gplog.Debug("%s", textmsg.InfoTextCommandExecutionSucceeded(command, "on host", host)) +} + +func deleteBackupDirOnSegments(path, host, userName string, errCh chan error) { + command := fmt.Sprintf("rm -rf %s", path) + gplog.Debug("%s", textmsg.InfoTextCommandExecution(command, "on host", host)) + if _, err := runSSHCommand(command, host, userName); err != nil { + gplog.Error("%s", textmsg.ErrorTextCommandExecutionFailed(err, command, "on host", host)) + errCh <- err + return + } + gplog.Debug("%s", textmsg.InfoTextCommandExecutionSucceeded(command, "on host", host)) +} diff --git a/gpbackman/cmd/backup_info.go b/gpbackman/cmd/backup_info.go new file mode 100644 index 00000000..18671a3b --- /dev/null +++ b/gpbackman/cmd/backup_info.go @@ -0,0 +1,335 @@ +package cmd + +import ( + "database/sql" + "os" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-backup/history" +) + +// Flags for the gpbackman backup-info command (backupInfoCmd) +var ( + backupInfoShowDeleted bool + backupInfoShowFailed bool + backupInfoBackupTypeFilter string + backupInfoTableNameFilter string + backupInfoSchemaNameFilter string + backupInfoExcludeFilter bool + backupInfoTimestamp string + backupInfoShowDetails bool +) + +// Options for the backup-info command. +type BackupInfoOptions struct { + ShowDeleted bool + ShowFailed bool + BackupTypeFilter string + TableNameFilter string + SchemaNameFilter string + ExcludeFilter bool + Timestamp string + ShowDetails bool +} + +var backupInfoCmd = &cobra.Command{ + Use: "backup-info", + Short: "Display information about backups", + Long: `Display information about backups. + +By default, only active backups or backups with deletion status "In progress" from gpbackup_history.db are displayed. + +To display deleted backups, use the --deleted option. +To display failed backups, use the --failed option. +To display all backups, use --deleted and --failed options together. + +To display backups of a specific type, use the --type option. + +To display backups that include the specified table, use the --table option. +The formatting rules for .
match those of the --include-table option in gpbackup. + +To display backups that include the specified schema, use the --schema option. +The formatting rules for match those of the --include-schema option in gpbackup. + +To display backups that exclude the specified table, use the --table and --exclude options. +The formatting rules for .
match those of the --exclude-table option in gpbackup. + +To display backups that exclude the specified schema, use the --schema and --exclude options. +The formatting rules for match those of the --exclude-schema option in gpbackup. + +To display details about object filtering, use the --detail option. +The details are presented as follows, depending on the active filtering type: + * include-table / exclude-table: a comma-separated list of fully-qualified table names in the format .
; + * include-schema / exclude-schema: a comma-separated list of schema names; + * if no object filtering was used, the value is empty. + +To display a backup chain for a specific backup, use the --timestamp option. +In this mode, the backup with the specified timestamp and all of its dependent backups will be displayed. +The deleted and failed backups are always included in this mode. +To display object filtering details in this mode, use the --detail option. +When --timestamp is set, the following options cannot be used: --type, --table, --schema, --exclude, --failed, --deleted. + +To display the "object filtering details" column for all backups without using --timestamp, use the --detail option. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory.`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + doRootFlagValidation(cmd.Flags(), checkFileExistsConst) + doBackupInfoFlagValidation(cmd.Flags()) + doBackupInfo() + }, +} + +func init() { + rootCmd.AddCommand(backupInfoCmd) + backupInfoCmd.Flags().StringVar( + &backupInfoTimestamp, + timestampFlagName, + "", + "show backup info and its dependent backups for the specified timestamp", + ) + backupInfoCmd.Flags().BoolVar( + &backupInfoShowDeleted, + deletedFlagName, + false, + "show deleted backups", + ) + backupInfoCmd.Flags().BoolVar( + &backupInfoShowFailed, + failedFlagName, + false, + "show failed backups", + ) + backupInfoCmd.Flags().StringVar( + &backupInfoBackupTypeFilter, + typeFlagName, + "", + "backup type filter (full, incremental, data-only, metadata-only)", + ) + backupInfoCmd.Flags().StringVar( + &backupInfoTableNameFilter, + tableFlagName, + "", + "show backups that include the specified table (format .
)", + ) + backupInfoCmd.Flags().StringVar( + &backupInfoSchemaNameFilter, + schemaFlagName, + "", + "show backups that include the specified schema", + ) + backupInfoCmd.Flags().BoolVar( + &backupInfoExcludeFilter, + excludeFlagName, + false, + "show backups that exclude the specific table (format .
) or schema", + ) + backupInfoCmd.Flags().BoolVar( + &backupInfoShowDetails, + detailFlagName, + false, + "show object filtering details", + ) +} + +// These flag checks are applied only for backup-info commands. +func doBackupInfoFlagValidation(flags *pflag.FlagSet) { + var err error + if flags.Changed(timestampFlagName) { + err = gpbckpconfig.CheckTimestamp(backupInfoTimestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupInfoTimestamp, timestampFlagName, err)) + execOSExit(exitErrorCode) + } + // --timestamp is not compatible with --type, --table, --schema, --exclude, --failed, --deleted + err = checkCompatibleFlags(flags, timestampFlagName, + typeFlagName, tableFlagName, schemaFlagName, excludeFlagName, failedFlagName, deletedFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, timestampFlagName, typeFlagName, tableFlagName, schemaFlagName, excludeFlagName, failedFlagName, deletedFlagName)) + execOSExit(exitErrorCode) + } + } + // If type is specified and have correct values. + if flags.Changed(typeFlagName) { + err = checkBackupType(backupInfoBackupTypeFilter) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupInfoBackupTypeFilter, typeFlagName, err)) + execOSExit(exitErrorCode) + } + } + // table flag and schema flags cannot be used together. + err = checkCompatibleFlags(flags, tableFlagName, schemaFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, tableFlagName, schemaFlagName)) + execOSExit(exitErrorCode) + } + // If table is specified and have correct values. + if flags.Changed(tableFlagName) { + err = gpbckpconfig.CheckTableFQN(backupInfoTableNameFilter) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(backupInfoTableNameFilter, tableFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If exclude flag is specified, but table or schema flag is not. + if flags.Changed(excludeFlagName) && !flags.Changed(tableFlagName) && !flags.Changed(schemaFlagName) { + gplog.Error("%s", textmsg.ErrorTextUnableValidateValue(textmsg.ErrorNotIndependentFlagsError(), tableFlagName, schemaFlagName)) + execOSExit(exitErrorCode) + } +} + +func doBackupInfo() { + logHeadersDebug() + err := backupInfo() + if err != nil { + execOSExit(exitErrorCode) + } +} + +func backupInfo() error { + opts := BackupInfoOptions{ + ShowDeleted: backupInfoShowDeleted, + ShowFailed: backupInfoShowFailed, + BackupTypeFilter: backupInfoBackupTypeFilter, + TableNameFilter: backupInfoTableNameFilter, + SchemaNameFilter: backupInfoSchemaNameFilter, + ExcludeFilter: backupInfoExcludeFilter, + Timestamp: backupInfoTimestamp, + ShowDetails: backupInfoShowDetails, + } + t := tablewriter.NewWriter(os.Stdout) + initTable(t, opts.ShowDetails) + hDB, err := gpbckpconfig.OpenHistoryDB(getHistoryDBPath(rootHistoryDB)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("open", err)) + return err + } + defer func() { + closeErr := hDB.Close() + if closeErr != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("close", closeErr)) + } + }() + err = backupInfoDB(opts, hDB, t) + if err != nil { + return err + } + t.Render() + return nil +} + +func backupInfoDB(opts BackupInfoOptions, hDB *sql.DB, t *tablewriter.Table) error { + // List all according to showDeleted/showFailed + if opts.Timestamp == "" { + backupList, err := gpbckpconfig.GetBackupNamesDB(opts.ShowDeleted, opts.ShowFailed, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadHistoryDB(err)) + return err + } + for _, backupName := range backupList { + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(backupName, err)) + return err + } + addBackupToTable(opts.BackupTypeFilter, opts.TableNameFilter, opts.SchemaNameFilter, opts.ExcludeFilter, opts.ShowDetails, backupData, t) + } + return nil + } + // Timestamp mode: show base backup and only its dependent backups + // Verify base backup exists + baseBackupData, err := gpbckpconfig.GetBackupDataDB(opts.Timestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(opts.Timestamp, err)) + return err + } + addBackupToTable("", "", "", false, opts.ShowDetails, baseBackupData, t) + backupDependenciesList, err := gpbckpconfig.GetBackupDependencies(opts.Timestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadHistoryDB(err)) + return err + } + for _, depTimestamp := range backupDependenciesList { + backupData, err := gpbckpconfig.GetBackupDataDB(depTimestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(depTimestamp, err)) + return err + } + addBackupToTable("", "", "", false, opts.ShowDetails, backupData, t) + } + return nil +} + +func initTable(t *tablewriter.Table, includeDetails bool) { + t.SetBorder(false) + header := []string{ + "timestamp", + "date", + "status", + "database", + "type", + "object filtering", + "plugin", + "duration", + "date deleted", + } + if includeDetails { + header = append(header, "object filtering details") + } + t.SetHeader(header) +} + +// addBackupToTable adds a backup to the table for displaying. +// +// If errors occur, they are logged, but they are not returned. +// The main idea is to show the maximum available information and display all errors that occur. +// But do not fall when errors occur. So, display anyway. +func addBackupToTable(backupTypeFilter, backupTableFilter, backupSchemaFilter string, backupExcludeFilter, includeDetails bool, backupData *history.BackupConfig, t *tablewriter.Table) { + var matchToObjectFilter bool + backupDate, err := gpbckpconfig.GetBackupDate(backupData) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("date", backupData.Timestamp, err)) + } + backupType, err := gpbckpconfig.GetBackupType(backupData) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("type", backupData.Timestamp, err)) + } + backupFilter, err := gpbckpconfig.GetObjectFilteringInfo(backupData) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("object filtering", backupData.Timestamp, err)) + } + backupDuration, err := gpbckpconfig.GetBackupDuration(backupData) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("duration", backupData.Timestamp, err)) + } + backupDateDeleted, err := gpbckpconfig.GetBackupDateDeleted(backupData) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("date deletion", backupData.Timestamp, err)) + } + matchToObjectFilter = gpbckpconfig.CheckObjectFilteringExists(backupData, backupTableFilter, backupSchemaFilter, backupFilter, backupExcludeFilter) + if (backupTypeFilter == "" || backupTypeFilter == backupType) && matchToObjectFilter { + row := []string{ + backupData.Timestamp, + backupDate, + backupData.Status, + backupData.DatabaseName, + backupType, + backupFilter, + backupData.Plugin, + formatBackupDuration(backupDuration), + backupDateDeleted, + } + if includeDetails { + row = append(row, gpbckpconfig.GetObjectFilteringDetails(backupData)) + } + t.Append(row) + } +} diff --git a/gpbackman/cmd/cmd_suite_test.go b/gpbackman/cmd/cmd_suite_test.go new file mode 100644 index 00000000..d84a7680 --- /dev/null +++ b/gpbackman/cmd/cmd_suite_test.go @@ -0,0 +1,18 @@ +package cmd + +import ( + "testing" + + "github.com/apache/cloudberry-go-libs/testhelper" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCmd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cmd Suite") +} + +var _ = BeforeSuite(func() { + _, _, _ = testhelper.SetupTestLogger() +}) diff --git a/gpbackman/cmd/constants.go b/gpbackman/cmd/constants.go new file mode 100644 index 00000000..0dce0aa3 --- /dev/null +++ b/gpbackman/cmd/constants.go @@ -0,0 +1,56 @@ +package cmd + +const ( + commandName = "gpbackman" + + // Plugin commands. + // To be able to work with various plugins, + // it is highly desirable to use the commands from the plugin specification. + // See https://github.com/greenplum-db/gpbackup/blob/710fe53305958c1faed2e6008b894b4923bed253/plugins/README.md + deleteBackupPluginCommand = "delete_backup" + restoreDataPluginCommand = "restore_data" + + historyFileNameBaseConst = "gpbackup_history" + historyFileDBSuffixConst = ".db" + historyDBNameConst = historyFileNameBaseConst + historyFileDBSuffixConst + + // Flags. + historyDBFlagName = "history-db" + logFileFlagName = "log-file" + logLevelConsoleFlagName = "log-level-console" + logLevelFileFlagName = "log-level-file" + timestampFlagName = "timestamp" + pluginConfigFileFlagName = "plugin-config" + reportFilePluginPathFlagName = "plugin-report-file-path" + deletedFlagName = "deleted" + failedFlagName = "failed" + cascadeFlagName = "cascade" + forceFlagName = "force" + olderThanDaysFlagName = "older-than-days" + beforeTimestampFlagName = "before-timestamp" + afterTimestampFlagName = "after-timestamp" + typeFlagName = "type" + tableFlagName = "table" + schemaFlagName = "schema" + excludeFlagName = "exclude" + backupDirFlagName = "backup-dir" + parallelProcessesFlagName = "parallel-processes" + ignoreErrorsFlagName = "ignore-errors" + detailFlagName = "detail" + + exitErrorCode = 1 + + // Default for checking the existence of the file. + checkFileExistsConst = true + + // Batch size for deleting from sqlite3. + // This is to prevent problem with sqlite3. + sqliteDeleteBatchSize = 1000 +) + +var ( + // Timestamp to delete all backups before. + beforeTimestamp string + // Timestamp to delete all backups after. + afterTimestamp string +) diff --git a/gpbackman/cmd/history_clean.go b/gpbackman/cmd/history_clean.go new file mode 100644 index 00000000..473bf334 --- /dev/null +++ b/gpbackman/cmd/history_clean.go @@ -0,0 +1,124 @@ +package cmd + +import ( + "database/sql" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" +) + +// Flags for the gpbackman history-clean command (historyCleanCmd) +var ( + historyCleanBeforeTimestamp string + historyCleanOlderThanDays uint +) + +var historyCleanCmd = &cobra.Command{ + Use: "history-clean", + Short: "Clean deleted backups from the history database", + Long: `Clean deleted backups from the history database. +Only the database is being cleaned up. + +Information is deleted only about deleted backups from gpbackup_history.db. Each backup must be deleted first. + +To delete information about backups older than the given timestamp, use the --before-timestamp option. +To delete information about backups older than the given number of days, use the --older-than-day option. +Only --older-than-days or --before-timestamp option must be specified, not both. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory.`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + doRootFlagValidation(cmd.Flags(), checkFileExistsConst) + doCleanHistoryFlagValidation(cmd.Flags()) + doCleanHistory() + }, +} + +func init() { + rootCmd.AddCommand(historyCleanCmd) + historyCleanCmd.PersistentFlags().UintVar( + &historyCleanOlderThanDays, + olderThanDaysFlagName, + 0, + "delete information about backups older than the given number of days", + ) + historyCleanCmd.PersistentFlags().StringVar( + &historyCleanBeforeTimestamp, + beforeTimestampFlagName, + "", + "delete information about backups older than the given timestamp", + ) + historyCleanCmd.MarkFlagsMutuallyExclusive(beforeTimestampFlagName, olderThanDaysFlagName) +} + +// These flag checks are applied only for history-clean command. +func doCleanHistoryFlagValidation(flags *pflag.FlagSet) { + var err error + // If before-timestamp are specified and have correct values. + if flags.Changed(beforeTimestampFlagName) { + err = gpbckpconfig.CheckTimestamp(historyCleanBeforeTimestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(historyCleanBeforeTimestamp, beforeTimestampFlagName, err)) + execOSExit(exitErrorCode) + } + beforeTimestamp = historyCleanBeforeTimestamp + } + if flags.Changed(olderThanDaysFlagName) { + beforeTimestamp = gpbckpconfig.GetTimestampOlderThan(historyCleanOlderThanDays) + } + if beforeTimestamp == "" { + gplog.Error("%s", textmsg.ErrorTextUnableValidateValue(textmsg.ErrorValidationValue(), olderThanDaysFlagName, beforeTimestampFlagName)) + execOSExit(exitErrorCode) + } +} + +func doCleanHistory() { + logHeadersDebug() + err := cleanHistory() + if err != nil { + execOSExit(exitErrorCode) + } +} + +func cleanHistory() error { + hDB, err := gpbckpconfig.OpenHistoryDB(getHistoryDBPath(rootHistoryDB)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("open", err)) + return err + } + defer func() { + closeErr := hDB.Close() + if closeErr != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("close", closeErr)) + } + }() + err = historyCleanDB(beforeTimestamp, hDB) + if err != nil { + return err + } + return nil +} + +func historyCleanDB(cutOffTimestamp string, hDB *sql.DB) error { + backupList, err := gpbckpconfig.GetBackupNamesForCleanBeforeTimestamp(cutOffTimestamp, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadHistoryDB(err)) + return err + } + if len(backupList) > 0 { + gplog.Debug("%s", textmsg.InfoTextBackupDeleteListFromHistory(backupList)) + err := gpbckpconfig.CleanBackupsDB(backupList, sqliteDeleteBatchSize, hDB) + if err != nil { + return err + } + } else { + gplog.Info("%s", textmsg.InfoTextNothingToDo()) + } + return nil +} diff --git a/gpbackman/cmd/interfaces.go b/gpbackman/cmd/interfaces.go new file mode 100644 index 00000000..91608514 --- /dev/null +++ b/gpbackman/cmd/interfaces.go @@ -0,0 +1,29 @@ +package cmd + +import ( + "database/sql" + + "github.com/apache/cloudberry-backup/utils" +) + +type backupDeleteInterface interface { + backupDeleteDB(backupName string, hDB *sql.DB, ignoreErrors bool) error +} + +type backupPluginDeleter struct { + pluginConfigPath string + pluginConfig *utils.PluginConfig +} + +func (bpd *backupPluginDeleter) backupDeleteDB(backupName string, hDB *sql.DB, ignoreErrors bool) error { + return backupDeleteDBPluginFunc(backupName, bpd.pluginConfigPath, bpd.pluginConfig, hDB, ignoreErrors) +} + +type backupLocalDeleter struct { + backupDir string + maxParallelProcesses int +} + +func (bld *backupLocalDeleter) backupDeleteDB(backupName string, hDB *sql.DB, ignoreErrors bool) error { + return backupDeleteDBLocalFunc(backupName, bld.backupDir, bld.maxParallelProcesses, hDB, ignoreErrors) +} diff --git a/gpbackman/cmd/report_info.go b/gpbackman/cmd/report_info.go new file mode 100644 index 00000000..b8a2cd21 --- /dev/null +++ b/gpbackman/cmd/report_info.go @@ -0,0 +1,277 @@ +package cmd + +import ( + "bytes" + "database/sql" + "fmt" + "os" + "path/filepath" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-backup/history" + "github.com/apache/cloudberry-backup/utils" +) + +// Flags for the gpbackman report-info command (reportInfoCmd) +var ( + reportInfoTimestamp string + reportInfoPluginConfigFile string + reportInfoReportFilePluginPath string + reportInfoBackupDir string +) + +var reportInfoCmd = &cobra.Command{ + Use: "report-info", + Short: "Display the report for a specific backup", + Long: `Display the report for a specific backup. + +The --timestamp option must be specified. + +The report could be displayed only for active backups. + +The full path to the backup directory can be set using the --backup-dir option. +The full path to the data directory is required. + +For local backups the following logic are applied: + * If the --backup-dir option is specified, the report will be searched in provided path. + * If the --backup-dir option is not specified, but the backup was made with --backup-dir flag for gpbackup, the report will be searched in provided path from backup manifest. + * If the --backup-dir option is not specified and backup directory is not specified in backup manifest, the utility try to connect to local cluster and get master data directory. + If this information is available, the report will be in master data directory. + * If backup is not local, the error will be returned. + +The storage plugin config file location can be set using the --plugin-config option. +The full path to the file is required. + +For non local backups the following logic are applied: + * If the --plugin-config option is specified, the report will be searched in provided location. + * If backup is local, the error will be returned. + +Only --backup-dir or --plugin-config option can be specified, not both. + +If a custom plugin is used, it is required to specify the path to the directory with the repo file using the --plugin-report-file-path option. +It is not necessary to use the --plugin-report-file-path flag for the following plugins (the path is generated automatically): + * gpbackup_s3_plugin. + +The gpbackup_history.db file location can be set using the --history-db option. +Can be specified only once. The full path to the file is required. +If the --history-db option is not specified, the history database will be searched in the current directory.`, + Args: cobra.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + doRootFlagValidation(cmd.Flags(), checkFileExistsConst) + doReportInfoFlagValidation(cmd.Flags()) + doReportInfo() + }, +} + +func init() { + rootCmd.AddCommand(reportInfoCmd) + reportInfoCmd.PersistentFlags().StringVar( + &reportInfoTimestamp, + timestampFlagName, + "", + "the backup timestamp for report displaying", + ) + reportInfoCmd.PersistentFlags().StringVar( + &reportInfoPluginConfigFile, + pluginConfigFileFlagName, + "", + "the full path to plugin config file", + ) + reportInfoCmd.PersistentFlags().StringVar( + &reportInfoReportFilePluginPath, + reportFilePluginPathFlagName, + "", + "the full path to plugin report file", + ) + reportInfoCmd.PersistentFlags().StringVar( + &reportInfoBackupDir, + backupDirFlagName, + "", + "the full path to backup directory", + ) + _ = reportInfoCmd.MarkPersistentFlagRequired(timestampFlagName) +} + +// These flag checks are applied only for report-info command. +func doReportInfoFlagValidation(flags *pflag.FlagSet) { + var err error + // If timestamps are specified and have correct values. + if flags.Changed(timestampFlagName) { + err = gpbckpconfig.CheckTimestamp(reportInfoTimestamp) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(reportInfoTimestamp, timestampFlagName, err)) + execOSExit(exitErrorCode) + } + } + // backup-dir anf plugin-config flags cannot be used together. + err = checkCompatibleFlags(flags, backupDirFlagName, pluginConfigFileFlagName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableCompatibleFlags(err, backupDirFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // If backup-dir flag is specified and it exists and the full path is specified. + if flags.Changed(backupDirFlagName) { + err = gpbckpconfig.CheckFullPath(reportInfoBackupDir, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(reportInfoBackupDir, backupDirFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If plugin-config flag is specified and it exists and the full path is specified. + if flags.Changed(pluginConfigFileFlagName) { + err = gpbckpconfig.CheckFullPath(reportInfoPluginConfigFile, checkFileExistsConst) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(reportInfoPluginConfigFile, pluginConfigFileFlagName, err)) + execOSExit(exitErrorCode) + } + } + // If plugin-report-file-path flag is specified. + if flags.Changed(reportFilePluginPathFlagName) { + // But plugin-config flag is not specified. + if !flags.Changed(pluginConfigFileFlagName) { + gplog.Error("%s", textmsg.ErrorTextUnableValidateValue(textmsg.ErrorNotIndependentFlagsError(), reportFilePluginPathFlagName, pluginConfigFileFlagName)) + execOSExit(exitErrorCode) + } + // Check full path. + err = gpbckpconfig.CheckFullPath(reportInfoReportFilePluginPath, false) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(reportInfoReportFilePluginPath, reportFilePluginPathFlagName, err)) + execOSExit(exitErrorCode) + } + } +} + +func doReportInfo() { + logHeadersDebug() + err := reportInfo() + if err != nil { + execOSExit(exitErrorCode) + } +} + +func reportInfo() error { + hDB, err := gpbckpconfig.OpenHistoryDB(getHistoryDBPath(rootHistoryDB)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("open", err)) + return err + } + defer func() { + closeErr := hDB.Close() + if closeErr != nil { + gplog.Error("%s", textmsg.ErrorTextUnableActionHistoryDB("close", closeErr)) + } + }() + if reportInfoPluginConfigFile != "" { + pluginConfig, err := utils.ReadPluginConfig(reportInfoPluginConfigFile) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableReadPluginConfigFile(err)) + return err + } + err = reportInfoDBPlugin(reportInfoTimestamp, reportInfoPluginConfigFile, pluginConfig, hDB) + if err != nil { + return err + } + } else { + err := reportInfoDBLocal(reportInfoTimestamp, reportInfoBackupDir, hDB) + if err != nil { + return err + } + } + return nil +} + +func reportInfoDBPlugin(backupName, pluginConfigPath string, pluginConfig *utils.PluginConfig, hDB *sql.DB) error { + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(backupName, err)) + return err + } + err = reportInfoPluginFunc(backupData, pluginConfigPath, pluginConfig) + if err != nil { + return err + } + return nil +} + +func reportInfoPluginFunc(backupData *history.BackupConfig, pluginConfigPath string, pluginConfig *utils.PluginConfig) error { + // Skip local backup. + canGetReport, err := checkBackupCanBeUsed(false, true, backupData) + if err != nil { + return err + } + if canGetReport { + reportFile, err := gpbckpconfig.GetReportFilePathPlugin(backupData, reportInfoReportFilePluginPath, pluginConfig.Options) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupPath("report", backupData.Timestamp, err)) + return err + } + gplog.Debug("%s", textmsg.InfoTextCommandExecution(pluginConfig.ExecutablePath, restoreDataPluginCommand, pluginConfigPath, reportFile)) + stdout, stderr, err := execReportInfo(pluginConfig.ExecutablePath, restoreDataPluginCommand, pluginConfigPath, reportFile) + if stderr != "" { + gplog.Error("%s", stderr) + } + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupReport(backupData.Timestamp, err)) + return err + } + // Display the report. + fmt.Println(stdout) + } + return nil +} + +func reportInfoDBLocal(backupName, backupDir string, hDB *sql.DB) error { + backupData, err := gpbckpconfig.GetBackupDataDB(backupName, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupInfo(backupName, err)) + return err + } + err = reportInfoFileLocalFunc(backupData, backupDir) + if err != nil { + return err + } + return nil +} + +func reportInfoFileLocalFunc(backupData *history.BackupConfig, backupDir string) error { + // Include local backup. + canGetReport, err := checkBackupCanBeUsed(false, false, backupData) + if err != nil { + return err + } + if canGetReport { + timestamp := backupData.Timestamp + bckpDir, segPrefix, _, err := getBackupMasterDir(backupDir, backupData.BackupDir, backupData.DatabaseName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupPath("backup directory", timestamp, err)) + return err + } + gplog.Debug("%s", textmsg.InfoTextBackupDirPath(bckpDir)) + gplog.Debug("%s", textmsg.InfoTextSegmentPrefix(segPrefix)) + reportFile := gpbckpconfig.ReportFilePath(bckpDir, timestamp) + // Sanitize the file path + reportFile = filepath.Clean(reportFile) + gplog.Debug("%s", textmsg.InfoTextCommandExecution("read file", reportFile)) + content, err := os.ReadFile(reportFile) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupReport(backupData.Timestamp, err)) + return err + } + fmt.Println(string(content)) + } + return nil +} + +func execReportInfo(executablePath, reportInfoPluginCommand, pluginConfigFile, file string) (string, string, error) { + cmd := execCommand(executablePath, reportInfoPluginCommand, pluginConfigFile, file) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err +} diff --git a/gpbackman/cmd/root.go b/gpbackman/cmd/root.go new file mode 100644 index 00000000..15f59f48 --- /dev/null +++ b/gpbackman/cmd/root.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "fmt" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var version string + +// Flags for the gpbackman command (rootCmd) +var ( + rootHistoryDB string + rootLogFile string + rootLogLevelConsole string + rootLogLevelFile string +) + +var rootCmd = &cobra.Command{ + Use: commandName, + Short: "gpBackMan - utility for managing backups created by gpbackup", + Args: cobra.NoArgs, +} + +func init() { + rootCmd.PersistentFlags().StringVar( + &rootHistoryDB, + historyDBFlagName, + "", + "full path to the gpbackup_history.db file", + ) + rootCmd.PersistentFlags().StringVar( + &rootLogFile, + logFileFlagName, + "", + "full path to log file directory, if not specified, the log file will be created in the $HOME/gpAdminLogs directory", + ) + rootCmd.PersistentFlags().StringVar( + &rootLogLevelConsole, + logLevelConsoleFlagName, + "info", + "level for console logging (error, info, debug, verbose)", + ) + rootCmd.PersistentFlags().StringVar( + &rootLogLevelFile, + logLevelFileFlagName, + "info", + "level for file logging (error, info, debug, verbose)", + ) +} + +func doInit() { + rootCmd.Version = version + // If log-file flag is specified the log file will be created in the specified directory + gplog.InitializeLogging(commandName, rootLogFile) +} + +func getVersion() string { + return rootCmd.Version +} + +// These flag checks are applied for all commands: +func doRootFlagValidation(flags *pflag.FlagSet, checkFileExists bool) { + var err error + // If history-db flag is specified and full path. + // The existence of the file is checked by condition from each specific command. + // Not all commands require a history db file to exist. + if flags.Changed(historyDBFlagName) { + err = gpbckpconfig.CheckFullPath(rootHistoryDB, checkFileExists) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(rootHistoryDB, historyDBFlagName, err)) + execOSExit(exitErrorCode) + } + } + // Check, that the log level is correct. + err = setLogLevelConsole(rootLogLevelConsole) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(rootLogLevelConsole, logLevelConsoleFlagName, err)) + execOSExit(exitErrorCode) + } + err = setLogLevelFile(rootLogLevelFile) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableValidateFlag(rootLogLevelFile, logLevelFileFlagName, err)) + execOSExit(exitErrorCode) + } +} + +func Execute() { + doInit() + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + execOSExit(exitErrorCode) + } +} diff --git a/gpbackman/cmd/wrappers.go b/gpbackman/cmd/wrappers.go new file mode 100644 index 00000000..48549b51 --- /dev/null +++ b/gpbackman/cmd/wrappers.go @@ -0,0 +1,239 @@ +package cmd + +import ( + "database/sql" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/apache/cloudberry-go-libs/gplog" + "github.com/spf13/pflag" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-backup/history" +) + +var execOSExit = os.Exit + +func logHeadersDebug() { + gplog.Debug("Start %s version %s", commandName, getVersion()) + gplog.Debug("Use console log level: %s", rootLogLevelConsole) + gplog.Debug("Use file log level: %s", rootLogLevelFile) + gplog.Debug("%s command: %s", commandName, os.Args) +} + +// Sets the log levels for the console and file loggers. +// Uppercase or lowercase letters are accepted. +// If an incorrect value is specified, an error is returned. +func setLogLevelConsole(level string) error { + switch strings.ToLower(level) { + case "info": + gplog.SetVerbosity(gplog.LOGINFO) + case "error": + gplog.SetVerbosity(gplog.LOGERROR) + case "debug": + gplog.SetVerbosity(gplog.LOGDEBUG) + case "verbose": + gplog.SetVerbosity(gplog.LOGVERBOSE) + default: + return textmsg.ErrorInvalidValueError() + } + return nil +} + +// Sets the log levels for the console and file loggers. +// Uppercase or lowercase letters are accepted. +// If an incorrect value is specified, an error is returned. +func setLogLevelFile(level string) error { + switch strings.ToLower(level) { + case "info": + gplog.SetLogFileVerbosity(gplog.LOGINFO) + case "error": + gplog.SetLogFileVerbosity(gplog.LOGERROR) + case "debug": + gplog.SetLogFileVerbosity(gplog.LOGDEBUG) + case "verbose": + gplog.SetLogFileVerbosity(gplog.LOGVERBOSE) + default: + return textmsg.ErrorInvalidValueError() + } + return nil +} + +func getHistoryDBPath(historyDBPath string) string { + var historyDBName = historyDBNameConst + if historyDBPath != "" { + return historyDBPath + } + return historyDBName +} + +func checkCompatibleFlags(flags *pflag.FlagSet, flagNames ...string) error { + n := 0 + for _, name := range flagNames { + if flags.Changed(name) { + n++ + } + } + if n > 1 { + return textmsg.ErrorIncompatibleFlagsError() + } + return nil +} + +func formatBackupDuration(value float64) string { + hours := int(value / 3600) + minutes := (int(value) % 3600) / 60 + seconds := int(value) % 60 + return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds) +} + +// The backup can be used in one of the cases for local and plugin backups: +// - backup is active +// - backup is not active, but the --force flag is set. +// Returns: +// - true, if backup can be used; +// - false, if backup can't be used. +// Errors and warnings will also returned and logged. +func checkBackupCanBeUsed(deleteForce, skipLocalBackup bool, backupData *history.BackupConfig) (bool, error) { + result := false + err := checkLocalBackupStatus(skipLocalBackup, gpbckpconfig.IsLocal(backupData)) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableWorkBackup(backupData.Timestamp, err)) + return result, err + } + if gpbckpconfig.IsInProgress(backupData) && !deleteForce { + gplog.Error("%s", textmsg.InfoTextBackupStatus(backupData.Timestamp, backupData.Status)) + return result, nil + } + backupDateDeleted, errDateDeleted := gpbckpconfig.GetBackupDateDeleted(backupData) + if errDateDeleted != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupValue("date deletion", backupData.Timestamp, errDateDeleted)) + } + // If the backup date deletion has invalid value, try to delete the backup. + if gpbckpconfig.IsBackupActive(backupDateDeleted) || errDateDeleted != nil { + result = true + } else { + if backupDateDeleted == gpbckpconfig.DateDeletedInProgress { + // We do not return the error here, + // because it is necessary to leave the possibility of starting the process + // of deleting backups that are stuck in the "In Progress" status using the --force flag. + gplog.Error("%s", textmsg.ErrorTextBackupDeleteInProgress(backupData.Timestamp, textmsg.ErrorBackupDeleteInProgressError())) + } else { + gplog.Debug("%s", textmsg.InfoTextBackupAlreadyDeleted(backupData.Timestamp)) + } + } + // If flag --force is set. + if deleteForce { + result = true + } + return result, nil +} + +// Check that specified backup type is supported. +func checkBackupType(backupType string) error { + var validVType = map[string]bool{ + gpbckpconfig.BackupTypeFull: true, + gpbckpconfig.BackupTypeIncremental: true, + gpbckpconfig.BackupTypeMetadataOnly: true, + gpbckpconfig.BackupTypeDataOnly: true, + } + if !validVType[backupType] { + return textmsg.ErrorInvalidValueError() + } + return nil +} + +// Check skip flag and local backup status. +// SkipLocalBackup - true, local backup - true, returns "is a local backup" error. +// SkipLocalBackup - false,local backup - false, returns "is not a local backup" error. +func checkLocalBackupStatus(skipLocalBackup, isLocalBackup bool) error { + if skipLocalBackup && isLocalBackup { + return textmsg.ErrorBackupLocalStorageError() + } + if !skipLocalBackup && !isLocalBackup { + return textmsg.ErrorBackupNotLocalStorageError() + } + return nil +} + +func getBackupMasterDir(backupDir, backupDataBackupDir, backupDataDBName string) (string, string, bool, error) { + if backupDir != "" { + return gpbckpconfig.CheckMasterBackupDir(backupDir) + } + if backupDataBackupDir != "" { + return gpbckpconfig.CheckMasterBackupDir(backupDataBackupDir) + } + // Try to get the backup directory from the cluster configuration. + // If the script executed not on the master host, the backup directory will not be found. + // And we return "value not set" error. + backupDirClusterInfo := getBackupMasterDirClusterInfo(backupDataDBName) + if backupDirClusterInfo != "" { + return backupDirClusterInfo, gpbckpconfig.GetSegPrefix(filepath.Join(backupDirClusterInfo, "backups")), false, nil + } + return "", "", false, textmsg.ErrorValidationValue() +} + +func getBackupSegmentDir(backupDir, backupDataBackupDir, backupDataDir, segPrefix, segID string, isSingleBackupDir bool) (string, error) { + if backupDir != "" { + return checkSingleBackupDir(backupDir, segPrefix, segID, isSingleBackupDir), nil + } + if backupDataBackupDir != "" { + return checkSingleBackupDir(backupDataBackupDir, segPrefix, segID, isSingleBackupDir), nil + } + if backupDataDir != "" { + return backupDataDir, nil + } + return "", textmsg.ErrorValidationValue() +} + +func checkSingleBackupDir(backupDir, segPrefix, segID string, isSingleBackupDir bool) string { + if isSingleBackupDir { + return backupDir + } + return filepath.Join(backupDir, fmt.Sprintf("%s%s", segPrefix, segID)) +} + +func getBackupMasterDirClusterInfo(dbName string) string { + db, err := gpbckpconfig.NewClusterLocalClusterConn(dbName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableConnectLocalCluster(err)) + return "" + } + defer db.Close() + sqlQuery := "SELECT datadir FROM gp_segment_configuration WHERE content = -1 AND role = 'p';" + queryResult, err := gpbckpconfig.ExecuteQueryLocalClusterConn[string](db, sqlQuery) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupDirLocalClusterConn(err)) + return "" + } + gplog.Debug("Master data directory: %s", queryResult) + return queryResult +} + +func getSegmentConfigurationClusterInfo(dbName string) ([]gpbckpconfig.SegmentConfig, error) { + queryResult := make([]gpbckpconfig.SegmentConfig, 0) + db, err := gpbckpconfig.NewClusterLocalClusterConn(dbName) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableConnectLocalCluster(err)) + return queryResult, err + } + defer db.Close() + sqlQuery := "SELECT content as contentid, hostname, datadir FROM gp_segment_configuration WHERE role = 'p' and content != -1 ORDER BY content;" + queryResult, err = gpbckpconfig.ExecuteQueryLocalClusterConn[[]gpbckpconfig.SegmentConfig](db, sqlQuery) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableGetBackupDirLocalClusterConn(err)) + return queryResult, err + } + return queryResult, nil +} + +func handleErrorDB(backupName, errorMessage, backupStatus string, hDB *sql.DB) { + gplog.Error("%s", errorMessage) + err := gpbckpconfig.UpdateDeleteStatus(backupName, backupStatus, hDB) + if err != nil { + gplog.Error("%s", textmsg.ErrorTextUnableSetBackupStatus(backupStatus, backupName, err)) + } +} diff --git a/gpbackman/cmd/wrappers_test.go b/gpbackman/cmd/wrappers_test.go new file mode 100644 index 00000000..550a91d9 --- /dev/null +++ b/gpbackman/cmd/wrappers_test.go @@ -0,0 +1,364 @@ +package cmd + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/apache/cloudberry-backup/gpbackman/gpbckpconfig" + "github.com/apache/cloudberry-backup/history" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/pflag" +) + +var _ = Describe("wrappers tests", func() { + Describe("getHistoryDBPath", func() { + It("returns default path when input is empty", func() { + Expect(getHistoryDBPath("")).To(Equal(historyDBNameConst)) + }) + + It("returns input path when not empty", func() { + Expect(getHistoryDBPath("path/to/" + historyDBNameConst)).To(Equal("path/to/" + historyDBNameConst)) + }) + }) + + Describe("formatBackupDuration", func() { + It("formats duration correctly", func() { + tests := []struct { + name string + value float64 + want string + }{ + {"01:00:00", 3600, "01:00:00"}, + {"01:01:01", 3661, "01:01:01"}, + {"00:00:00", 0, "00:00:00"}, + } + for _, tt := range tests { + Expect(formatBackupDuration(tt.value)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("checkCompatibleFlags", func() { + It("does not return error when no flags changed", func() { + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + Expect(checkCompatibleFlags(flags)).To(Succeed()) + }) + + It("does not return error when one flag changed", func() { + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("flag1", "", "") + flags.Set("flag1", "") + Expect(checkCompatibleFlags(flags, "flag1")).To(Succeed()) + }) + + It("returns error when multiple flags changed", func() { + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("flag1", "", "") + flags.String("flag2", "", "") + flags.Set("flag1", "") + flags.Set("flag2", "") + err := checkCompatibleFlags(flags, "flag1", "flag2") + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("checkBackupCanBeUsed", func() { + It("returns correct result for various backup configurations", func() { + tests := []struct { + name string + deleteForce bool + skipLocalBackup bool + backupConfig history.BackupConfig + want bool + wantErr bool + }{ + { + name: "successful backup with plugin and force, skipLocalBackup true", + deleteForce: true, + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + }, + want: true, + }, + { + name: "successful backup with plugin and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + }, + want: true, + }, + { + name: "failed backup with plugin and force", + deleteForce: true, + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusFailed, + Plugin: gpbckpconfig.BackupS3Plugin, + }, + want: true, + }, + { + name: "failed backup with plugin and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusFailed, + Plugin: gpbckpconfig.BackupS3Plugin, + }, + want: true, + }, + { + name: "successful backup without plugin and force", + deleteForce: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + }, + want: true, + }, + { + name: "successful backup without plugin and without force", + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + }, + want: true, + }, + { + name: "successful deleted backup with plugin and force", + deleteForce: true, + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: "20240113210000", + }, + want: true, + }, + { + name: "successful deleted backup with plugin and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: "20240113210000", + }, + want: false, + }, + { + name: "invalid backup status with plugin and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: "some_status", + Plugin: gpbckpconfig.BackupS3Plugin, + }, + want: true, + }, + { + name: "successful backup with plugin with deletion in progress and force", + deleteForce: true, + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: gpbckpconfig.DateDeletedInProgress, + }, + want: true, + }, + { + name: "successful backup with plugin with deletion in progress and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: gpbckpconfig.DateDeletedInProgress, + }, + want: false, + }, + { + name: "successful backup with plugin with invalid deletion date and without force", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: "some date", + }, + want: true, + }, + { + name: "successful backup with plugin with invalid skipLocalBackup variable", + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + Plugin: gpbckpconfig.BackupS3Plugin, + DateDeleted: "some date", + }, + wantErr: true, + }, + { + name: "successful backup without plugin with invalid skipLocalBackup variable", + skipLocalBackup: true, + backupConfig: history.BackupConfig{ + Status: history.BackupStatusSucceed, + DateDeleted: "some date", + }, + wantErr: true, + }, + } + for _, tt := range tests { + got, err := checkBackupCanBeUsed(tt.deleteForce, tt.skipLocalBackup, &tt.backupConfig) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(got).To(Equal(tt.want), tt.name) + } + } + }) + }) + + Describe("checkBackupType", func() { + It("accepts valid backup type", func() { + Expect(checkBackupType(gpbckpconfig.BackupTypeFull)).To(Succeed()) + }) + + It("rejects invalid backup type", func() { + Expect(checkBackupType("InvalidType")).To(HaveOccurred()) + }) + }) + + Describe("getBackupMasterDir", func() { + It("returns correct values for various backup dirs", func() { + tempDir, err := os.MkdirTemp("", "gpbackman-test-") + Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tempDir) + + tests := []struct { + name string + testDir string + backupDir string + backupDataBackupDir string + backupDataDBName string + wantBackupMasterDir string + wantSegPrefix string + wantIsSingleBackupDir bool + wantErr bool + }{ + { + name: "backupDir is set and valid", + testDir: filepath.Join(tempDir, "segPrefix", "segment-1", "backups"), + backupDir: filepath.Join(tempDir, "segPrefix"), + wantBackupMasterDir: filepath.Join(tempDir, "segPrefix", "segment-1"), + wantSegPrefix: "segment", + wantIsSingleBackupDir: false, + }, + { + name: "backupDataBackupDir is set and valid", + testDir: filepath.Join(tempDir, "segPrefix2", "segment-1", "backups"), + backupDataBackupDir: filepath.Join(tempDir, "segPrefix2"), + wantBackupMasterDir: filepath.Join(tempDir, "segPrefix2", "segment-1"), + wantSegPrefix: "segment", + wantIsSingleBackupDir: false, + }, + } + for _, tt := range tests { + err := os.MkdirAll(tt.testDir, 0o755) + Expect(err).ToNot(HaveOccurred()) + gotBackupMasterDir, gotSegPrefix, gotIsSingleBackupDir, err := getBackupMasterDir(tt.backupDir, tt.backupDataBackupDir, tt.backupDataDBName) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(gotBackupMasterDir).To(Equal(tt.wantBackupMasterDir), tt.name) + Expect(gotSegPrefix).To(Equal(tt.wantSegPrefix), tt.name) + Expect(gotIsSingleBackupDir).To(Equal(tt.wantIsSingleBackupDir), tt.name) + } + } + }) + }) + + Describe("checkSingleBackupDir", func() { + It("returns backupDir when isSingleBackupDir is true", func() { + got := checkSingleBackupDir("/path/to/backup", "seg", "1", true) + Expect(got).To(Equal("/path/to/backup")) + }) + + It("returns composed path when isSingleBackupDir is false", func() { + got := checkSingleBackupDir("/path/to/backup", "seg", "1", false) + Expect(got).To(Equal(filepath.Join("/path/to/backup", fmt.Sprintf("%s%s", "seg", "1")))) + }) + }) + + Describe("getBackupSegmentDir", func() { + It("returns correct segment dir for various inputs", func() { + tests := []struct { + name string + backupDir string + backupDataBackupDir string + backupDataDir string + isSingleBackupDir bool + want string + wantErr bool + }{ + { + name: "backupDir is not empty", + backupDir: "/path/to/backupDir", + isSingleBackupDir: true, + want: "/path/to/backupDir", + }, + { + name: "backupDataBackupDir is not empty", + backupDataBackupDir: "/path/to/backupDataBackupDir", + isSingleBackupDir: true, + want: "/path/to/backupDataBackupDir", + }, + { + name: "backupDataDir is not empty", + backupDataDir: "/path/to/backupDataDir", + isSingleBackupDir: true, + want: "/path/to/backupDataDir", + }, + { + name: "all backup directories are empty", + isSingleBackupDir: true, + wantErr: true, + }, + } + for _, tt := range tests { + got, err := getBackupSegmentDir(tt.backupDir, tt.backupDataBackupDir, tt.backupDataDir, "seg", "1", tt.isSingleBackupDir) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(got).To(Equal(tt.want), tt.name) + } + } + }) + }) + + Describe("checkLocalBackupStatus", func() { + It("returns correct result for various inputs", func() { + tests := []struct { + name string + skipLocalBackup bool + isLocalBackup bool + wantErr bool + }{ + {"skip local and local backup", true, true, true}, + {"skip local and plugin backup", true, false, false}, + {"do not skip local and local backup", false, true, false}, + {"do not skip local and plugin backup", false, false, true}, + } + for _, tt := range tests { + err := checkLocalBackupStatus(tt.skipLocalBackup, tt.isLocalBackup) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + } + } + }) + }) +}) diff --git a/gpbackman/gpbckpconfig/cluster.go b/gpbackman/gpbckpconfig/cluster.go new file mode 100644 index 00000000..ed59370f --- /dev/null +++ b/gpbackman/gpbckpconfig/cluster.go @@ -0,0 +1,79 @@ +package gpbckpconfig + +import ( + "fmt" + "strconv" + + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-go-libs/operating" + "github.com/jmoiron/sqlx" + _ "github.com/lib/pq" +) + +type SegmentConfig struct { + ContentID string + Hostname string + DataDir string +} + +// NewClusterLocalClusterConn creates a new connection to the local postgres database. +// Returns an error if the connection could not be established. +func NewClusterLocalClusterConn(dbName string) (*sqlx.DB, error) { + if dbName == "" { + return nil, textmsg.ErrorEmptyDatabase() + } + username := operating.System.Getenv("PGUSER") + if username == "" { + currentUser, _ := operating.System.CurrentUser() + username = currentUser.Username + } + host := operating.System.Getenv("PGHOST") + if host == "" { + host, _ = operating.System.Hostname() + } + port, err := strconv.Atoi(operating.System.Getenv("PGPORT")) + if err != nil { + port = 5432 + } + connStr := fmt.Sprintf("postgres://%s@%s:%d/%s?sslmode=disable&connect_timeout=60", username, host, port, dbName) + return sqlx.Connect("postgres", connStr) +} + +// ExecuteQueryLocalClusterConn executes a query on the local cluster connection and returns the result. +// The function is generic and can handle different types of results based on the type parameter T. +// +// Parameters: +// - conn: A pointer to the sqlx.DB connection object. +// - query: A string containing the SQL query to be executed. +// +// Returns: +// - T: The result of the query, which can be of any type specified by the caller. +// - error: An error object if the query execution fails or if the type is unsupported. +// +// The function supports the following types for T: +// - string: The result will be a single string value. +// - []SegmentConfig: The result will be a slice of SegmentConfig structs. +// +// If the type T is not supported, the function returns an error indicating the unsupported type. +func ExecuteQueryLocalClusterConn[T any](conn *sqlx.DB, query string) (T, error) { + var result T + switch any(result).(type) { + case string: + var data string + err := conn.Get(&data, query) + if err != nil { + return result, err + } + result = any(data).(T) + case []SegmentConfig: + var segConfigs []SegmentConfig + err := conn.Select(&segConfigs, query) + if err != nil { + return result, err + } + result = any(segConfigs).(T) + default: + return result, fmt.Errorf("unsupported type") + } + return result, nil +} diff --git a/gpbackman/gpbckpconfig/gpbckpconfig_suite_test.go b/gpbackman/gpbckpconfig/gpbckpconfig_suite_test.go new file mode 100644 index 00000000..d8602978 --- /dev/null +++ b/gpbackman/gpbckpconfig/gpbckpconfig_suite_test.go @@ -0,0 +1,13 @@ +package gpbckpconfig + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestGpbckpconfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Gpbckpconfig Suite") +} diff --git a/gpbackman/gpbckpconfig/helpers.go b/gpbackman/gpbckpconfig/helpers.go new file mode 100644 index 00000000..3336c026 --- /dev/null +++ b/gpbackman/gpbckpconfig/helpers.go @@ -0,0 +1,235 @@ +package gpbckpconfig + +import ( + "errors" + "strings" + "time" + + "github.com/apache/cloudberry-backup/history" + "github.com/apache/cloudberry-backup/utils" +) + +// GetBackupType Get backup type. +// The value is calculated, based on: +// - full - contains user data, all global and local metadata for the database; +// - incremental – contains user data, all global and local metadata changed since a previous full backup; +// - metadata-only – contains only global and local metadata for the database; +// - data-only – contains only user data from the database. +// +// In all other cases, an error is returned. +func GetBackupType(backupConfig *history.BackupConfig) (string, error) { + // For gpbackup you cannot combine --data-only or --metadata-only with --incremental (see docs). + // So these options cannot be set at the same time. + // If not one of the --data-only, --metadata-only and --incremental flags is not set, + // the full value is returned. + // But if there are no tables in backup set contain data, + // the metadata-only value is returned. + // See https://github.com/woblerr/gpbackup/blob/b061a47b673238439442340e66ca57d896edacd5/backup/backup.go#L127-L129 + switch { + case !backupConfig.Incremental && !backupConfig.DataOnly && !backupConfig.MetadataOnly: + return BackupTypeFull, nil + case backupConfig.Incremental && !backupConfig.DataOnly && !backupConfig.MetadataOnly: + return BackupTypeIncremental, nil + case backupConfig.DataOnly && !backupConfig.Incremental && !backupConfig.MetadataOnly: + return BackupTypeDataOnly, nil + // If only metadata-only value. + // Or combination metadata-only and incremental or metadata-only and data-only. + // The case when there are no tables in backup set contain data. + case (backupConfig.MetadataOnly && !backupConfig.Incremental) || (backupConfig.MetadataOnly && !backupConfig.DataOnly): + return BackupTypeMetadataOnly, nil + default: + return "", errors.New("backup type does not match any of the available values") + } +} + +// GetObjectFilteringInfo Get object filtering information. +// The value is calculated, base on whether at least one of the flags was specified: +// - include-schema – at least one "--include-schema" option was specified; +// - exclude-schema – at least one "--exclude-schema" option was specified; +// - include-table – at least one "--include-table" option was specified; +// - exclude-table – at least one "--exclude-table" option was specified; +// - "" - no options was specified. +// +// For gpbackup only one type of filters can be used (see docs). +// So these options cannot be set at the same time. +// If not one of these flags is not set, +// the "" value is returned. +// In all other cases, an error is returned. +func GetObjectFilteringInfo(backupConfig *history.BackupConfig) (string, error) { + switch { + case backupConfig.IncludeSchemaFiltered && + !backupConfig.ExcludeSchemaFiltered && + !backupConfig.IncludeTableFiltered && + !backupConfig.ExcludeTableFiltered: + return objectFilteringIncludeSchema, nil + case backupConfig.ExcludeSchemaFiltered && + !backupConfig.IncludeSchemaFiltered && + !backupConfig.IncludeTableFiltered && + !backupConfig.ExcludeTableFiltered: + return objectFilteringExcludeSchema, nil + case backupConfig.IncludeTableFiltered && + !backupConfig.IncludeSchemaFiltered && + !backupConfig.ExcludeSchemaFiltered && + !backupConfig.ExcludeTableFiltered: + return objectFilteringIncludeTable, nil + case backupConfig.ExcludeTableFiltered && + !backupConfig.IncludeSchemaFiltered && + !backupConfig.ExcludeSchemaFiltered && + !backupConfig.IncludeTableFiltered: + return objectFilteringExcludeTable, nil + case !backupConfig.ExcludeTableFiltered && + !backupConfig.IncludeSchemaFiltered && + !backupConfig.ExcludeSchemaFiltered && + !backupConfig.IncludeTableFiltered: + return "", nil + default: + return "", errors.New("backup filtering type does not match any of the available values") + } +} + +// GetObjectFilteringDetails returns a comma-separated string with object filtering details +// depending on the active filtering type. If no filtering is active, it returns an empty string. +func GetObjectFilteringDetails(backupConfig *history.BackupConfig) string { + filter, _ := GetObjectFilteringInfo(backupConfig) + switch filter { + case objectFilteringIncludeTable: + return strings.Join(backupConfig.IncludeRelations, ", ") + case objectFilteringExcludeTable: + return strings.Join(backupConfig.ExcludeRelations, ", ") + case objectFilteringIncludeSchema: + return strings.Join(backupConfig.IncludeSchemas, ", ") + case objectFilteringExcludeSchema: + return strings.Join(backupConfig.ExcludeSchemas, ", ") + default: + return "" + } +} + +// GetBackupDate Get backup date. +// If an error occurs when parsing the date, the empty string and error are returned. +func GetBackupDate(backupConfig *history.BackupConfig) (string, error) { + var date string + t, err := time.Parse(Layout, backupConfig.Timestamp) + if err != nil { + return date, err + } + date = t.Format(DateFormat) + return date, nil +} + +// GetBackupDuration Get backup duration in seconds. +// If an error occurs when parsing the date, the zero duration and error are returned. +func GetBackupDuration(backupConfig *history.BackupConfig) (float64, error) { + var zeroDuration float64 + startTime, err := time.Parse(Layout, backupConfig.Timestamp) + if err != nil { + return zeroDuration, err + } + endTime, err := time.Parse(Layout, backupConfig.EndTime) + if err != nil { + return zeroDuration, err + } + return endTime.Sub(startTime).Seconds(), nil +} + +// GetBackupDateDeleted Get backup deletion date or backup deletion status. +// The possible values are: +// - In progress - if the value is set to "In progress"; +// - Plugin Backup Delete Failed - if the value is set to "Plugin Backup Delete Failed"; +// - Local Delete Failed - if the value is set to "Local Delete Failed"; +// - "" - if backup is active; +// - date in format "Mon Jan 02 2006 15:04:05" - if backup is deleted and deletion timestamp is set. +// +// In all other cases, an error is returned. +func GetBackupDateDeleted(backupConfig *history.BackupConfig) (string, error) { + switch backupConfig.DateDeleted { + case "", DateDeletedInProgress, DateDeletedPluginFailed, DateDeletedLocalFailed: + return backupConfig.DateDeleted, nil + default: + t, err := time.Parse(Layout, backupConfig.DateDeleted) + if err != nil { + return backupConfig.DateDeleted, err + } + return t.Format(DateFormat), nil + } +} + +// IsSuccess Check backup status. +// Returns: +// - true - if backup is successful, +// - false - false if backup is not successful or in progress. +// +// In all other cases, an error is returned. +func IsSuccess(backupConfig *history.BackupConfig) (bool, error) { + switch backupConfig.Status { + case history.BackupStatusSucceed: + return true, nil + case history.BackupStatusFailed, history.BackupStatusInProgress: + return false, nil + default: + return false, errors.New("backup status does not match any of the available values") + } +} + +// IsLocal Check if the backup in local or in plugin storage. +// Returns: +// - true - if the backup in local storage (plugin field is empty); +// - false - if the backup in plugin storage (plugin field is not empty). +func IsLocal(backupConfig *history.BackupConfig) bool { + return backupConfig.Plugin == "" +} + +// IsInProgress Check if the backup is in progress. +func IsInProgress(backupConfig *history.BackupConfig) bool { + return backupConfig.Status == history.BackupStatusInProgress +} + +// GetReportFilePathPlugin Return path to report file name for specific plugin. +// If custom report path is set, it is returned. +// Otherwise, the path from plugin is returned. +func GetReportFilePathPlugin(backupConfig *history.BackupConfig, customReportPath string, pluginOptions map[string]string) (string, error) { + if customReportPath != "" { + return backupPluginCustomReportPath(backupConfig.Timestamp, customReportPath), nil + } + // In future another plugins may be added. + switch backupConfig.Plugin { + case BackupS3Plugin: + return backupS3PluginReportPath(backupConfig.Timestamp, pluginOptions) + default: + // nothing to do + } + return "", errors.New("the path to the report is not specified") +} + +// CheckObjectFilteringExists checks if the object filtering exists in the backup. +// +// This function is responsible for determining whether table or schema filtering exists in the backup, and if so, whether the specified filter type is being used. +// Returns: +// - true - if table or schema filtering exists in the backup or no filters are specified; +// - false - if table or schema filtering does not exists in the backup. +func CheckObjectFilteringExists(backupConfig *history.BackupConfig, tableFilter, schemaFilter, objectFilter string, excludeFilter bool) bool { + switch { + case tableFilter != "" && !excludeFilter: + if objectFilter == objectFilteringIncludeTable { + return utils.Exists(backupConfig.IncludeRelations, tableFilter) + } + return false + case tableFilter != "" && excludeFilter: + if objectFilter == objectFilteringExcludeTable { + return utils.Exists(backupConfig.ExcludeRelations, tableFilter) + } + return false + case schemaFilter != "" && !excludeFilter: + if objectFilter == objectFilteringIncludeSchema { + return utils.Exists(backupConfig.IncludeSchemas, schemaFilter) + } + return false + case schemaFilter != "" && excludeFilter: + if objectFilter == objectFilteringExcludeSchema { + return utils.Exists(backupConfig.ExcludeSchemas, schemaFilter) + } + return false + default: + return true + } +} diff --git a/gpbackman/gpbckpconfig/helpers_test.go b/gpbackman/gpbckpconfig/helpers_test.go new file mode 100644 index 00000000..8065daa1 --- /dev/null +++ b/gpbackman/gpbckpconfig/helpers_test.go @@ -0,0 +1,558 @@ +package gpbckpconfig + +import ( + "github.com/apache/cloudberry-backup/history" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("helpers tests", func() { + Describe("GetBackupType", func() { + It("returns correct backup type", func() { + tests := []struct { + name string + config history.BackupConfig + want string + wantErr bool + }{ + { + name: "incremental backup", + config: history.BackupConfig{Incremental: true}, + want: BackupTypeIncremental, + wantErr: false, + }, + { + name: "data-only backup", + config: history.BackupConfig{DataOnly: true}, + want: BackupTypeDataOnly, + wantErr: false, + }, + { + name: "metadata-only backup", + config: history.BackupConfig{MetadataOnly: true}, + want: BackupTypeMetadataOnly, + wantErr: false, + }, + { + name: "metadata-only when data-only also set", + config: history.BackupConfig{DataOnly: true, MetadataOnly: true}, + want: BackupTypeMetadataOnly, + wantErr: false, + }, + { + name: "metadata-only when incremental also set", + config: history.BackupConfig{Incremental: true, MetadataOnly: true}, + want: BackupTypeMetadataOnly, + wantErr: false, + }, + { + name: "full backup", + config: history.BackupConfig{ + Incremental: false, + DataOnly: false, + MetadataOnly: false, + }, + want: BackupTypeFull, + wantErr: false, + }, + { + name: "invalid backup case 1", + config: history.BackupConfig{Incremental: true, DataOnly: true}, + want: "", + wantErr: true, + }, + { + name: "invalid backup case 2", + config: history.BackupConfig{Incremental: true, DataOnly: true, MetadataOnly: true}, + want: "", + wantErr: true, + }, + } + for _, tt := range tests { + cfg := tt.config + got, err := GetBackupType(&cfg) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + } + Expect(got).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("GetObjectFilteringInfo", func() { + It("returns correct filtering info", func() { + tests := []struct { + name string + config history.BackupConfig + want string + wantErr bool + }{ + { + name: "IncludeSchemaFiltered", + config: history.BackupConfig{IncludeSchemaFiltered: true}, + want: objectFilteringIncludeSchema, + }, + { + name: "ExcludeSchemaFiltered", + config: history.BackupConfig{ExcludeSchemaFiltered: true}, + want: objectFilteringExcludeSchema, + }, + { + name: "IncludeTableFiltered", + config: history.BackupConfig{IncludeTableFiltered: true}, + want: objectFilteringIncludeTable, + }, + { + name: "ExcludeTableFiltered", + config: history.BackupConfig{ExcludeTableFiltered: true}, + want: objectFilteringExcludeTable, + }, + { + name: "NoFiltering", + config: history.BackupConfig{}, + want: "", + }, + { + name: "Invalid IncludeTable and ExcludeTable", + config: history.BackupConfig{IncludeTableFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema and ExcludeSchema", + config: history.BackupConfig{IncludeSchemaFiltered: true, ExcludeSchemaFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema and IncludeTable", + config: history.BackupConfig{IncludeSchemaFiltered: true, IncludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema and ExcludeTable", + config: history.BackupConfig{IncludeSchemaFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid ExcludeSchema and IncludeTable", + config: history.BackupConfig{ExcludeSchemaFiltered: true, IncludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid ExcludeSchema and ExcludeTable", + config: history.BackupConfig{ExcludeSchemaFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema IncludeTable and ExcludeTable", + config: history.BackupConfig{IncludeSchemaFiltered: true, IncludeTableFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema ExcludeSchema and IncludeTable", + config: history.BackupConfig{IncludeSchemaFiltered: true, ExcludeSchemaFiltered: true, IncludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid IncludeSchema ExcludeSchema and ExcludeTable", + config: history.BackupConfig{IncludeSchemaFiltered: true, ExcludeSchemaFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + { + name: "Invalid all filters set", + config: history.BackupConfig{IncludeSchemaFiltered: true, ExcludeSchemaFiltered: true, IncludeTableFiltered: true, ExcludeTableFiltered: true}, + wantErr: true, + }, + } + for _, tt := range tests { + cfg := tt.config + got, err := GetObjectFilteringInfo(&cfg) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(got).To(Equal(tt.want), tt.name) + } + } + }) + }) + + Describe("GetObjectFilteringDetails", func() { + It("returns correct filtering details", func() { + tests := []struct { + name string + config history.BackupConfig + want string + }{ + { + name: "IncludeTable details", + config: history.BackupConfig{ + IncludeTableFiltered: true, + IncludeRelations: []string{"public.t1", "s.t2"}, + }, + want: "public.t1, s.t2", + }, + { + name: "ExcludeTable details", + config: history.BackupConfig{ + ExcludeTableFiltered: true, + ExcludeRelations: []string{"public.t3"}, + }, + want: "public.t3", + }, + { + name: "IncludeSchema details", + config: history.BackupConfig{ + IncludeSchemaFiltered: true, + IncludeSchemas: []string{"public", "sales"}, + }, + want: "public, sales", + }, + { + name: "ExcludeSchema details", + config: history.BackupConfig{ + ExcludeSchemaFiltered: true, + ExcludeSchemas: []string{"tmp"}, + }, + want: "tmp", + }, + { + name: "No filtering", + config: history.BackupConfig{}, + want: "", + }, + } + for _, tt := range tests { + cfg := tt.config + got := GetObjectFilteringDetails(&cfg) + Expect(got).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("GetBackupDate", func() { + It("parses valid timestamp", func() { + cfg := history.BackupConfig{Timestamp: "20220401102430"} + got, err := GetBackupDate(&cfg) + Expect(err).ToNot(HaveOccurred()) + Expect(got).To(Equal("Fri Apr 01 2022 10:24:30")) + }) + + It("returns error for invalid timestamp", func() { + cfg := history.BackupConfig{Timestamp: "invalid"} + _, err := GetBackupDate(&cfg) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("GetBackupDuration", func() { + It("calculates duration correctly", func() { + cfg := history.BackupConfig{ + Timestamp: "20220401102430", + EndTime: "20220401115502", + } + got, err := GetBackupDuration(&cfg) + Expect(err).ToNot(HaveOccurred()) + Expect(got).To(Equal(float64(5432))) + }) + + It("returns error for invalid start timestamp", func() { + cfg := history.BackupConfig{ + Timestamp: "invalid", + EndTime: "20220401115502", + } + _, err := GetBackupDuration(&cfg) + Expect(err).To(HaveOccurred()) + }) + + It("returns error for invalid end timestamp", func() { + cfg := history.BackupConfig{ + Timestamp: "20220401102430", + EndTime: "invalid", + } + _, err := GetBackupDuration(&cfg) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("GetBackupDateDeleted", func() { + It("returns correct date deleted", func() { + tests := []struct { + name string + config history.BackupConfig + want string + wantErr bool + }{ + { + name: "empty", + config: history.BackupConfig{DateDeleted: ""}, + want: "", + }, + { + name: "in progress", + config: history.BackupConfig{DateDeleted: DateDeletedInProgress}, + want: DateDeletedInProgress, + }, + { + name: "plugin backup delete failed", + config: history.BackupConfig{DateDeleted: DateDeletedPluginFailed}, + want: DateDeletedPluginFailed, + }, + { + name: "local delete failed", + config: history.BackupConfig{DateDeleted: DateDeletedLocalFailed}, + want: DateDeletedLocalFailed, + }, + { + name: "valid date", + config: history.BackupConfig{DateDeleted: "20220401102430"}, + want: "Fri Apr 01 2022 10:24:30", + }, + { + name: "invalid date", + config: history.BackupConfig{DateDeleted: "InvalidDate"}, + want: "InvalidDate", + wantErr: true, + }, + } + for _, tt := range tests { + cfg := tt.config + got, err := GetBackupDateDeleted(&cfg) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + } + Expect(got).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("IsSuccess", func() { + It("returns true for success status", func() { + cfg := history.BackupConfig{Status: history.BackupStatusSucceed} + got, err := IsSuccess(&cfg) + Expect(err).ToNot(HaveOccurred()) + Expect(got).To(BeTrue()) + }) + + It("returns false for failure status", func() { + cfg := history.BackupConfig{Status: history.BackupStatusFailed} + got, err := IsSuccess(&cfg) + Expect(err).ToNot(HaveOccurred()) + Expect(got).To(BeFalse()) + }) + + It("returns error for unknown status", func() { + cfg := history.BackupConfig{Status: "unknown"} + _, err := IsSuccess(&cfg) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("IsLocal", func() { + It("returns true when plugin is empty", func() { + cfg := history.BackupConfig{Plugin: ""} + Expect(IsLocal(&cfg)).To(BeTrue()) + }) + + It("returns false when plugin is set", func() { + cfg := history.BackupConfig{Plugin: "plugin"} + Expect(IsLocal(&cfg)).To(BeFalse()) + }) + }) + + Describe("IsInProgress", func() { + It("returns correct result for various statuses", func() { + tests := []struct { + name string + status string + want bool + }{ + {"in progress", history.BackupStatusInProgress, true}, + {"success", history.BackupStatusSucceed, false}, + {"failure", history.BackupStatusFailed, false}, + {"empty", "", false}, + {"unknown", "unknown", false}, + } + for _, tt := range tests { + cfg := history.BackupConfig{Status: tt.status} + Expect(IsInProgress(&cfg)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("GetReportFilePathPlugin", func() { + It("returns correct report path", func() { + tests := []struct { + name string + config history.BackupConfig + customReportPath string + pluginOptions map[string]string + want string + wantErr bool + }{ + { + name: "custom report path", + config: history.BackupConfig{ + Timestamp: "20220401102430", + Plugin: BackupS3Plugin, + }, + customReportPath: "/path/to/report", + pluginOptions: make(map[string]string), + want: "/path/to/report/gpbackup_20220401102430_report", + }, + { + name: "s3 plugin folder absent", + config: history.BackupConfig{ + Timestamp: "20220401102430", + Plugin: BackupS3Plugin, + }, + pluginOptions: map[string]string{"bucket": "bucket"}, + wantErr: true, + }, + { + name: "s3 plugin folder empty", + config: history.BackupConfig{ + Timestamp: "20220401102430", + Plugin: BackupS3Plugin, + }, + pluginOptions: map[string]string{"folder": ""}, + wantErr: true, + }, + { + name: "s3 plugin folder ok", + config: history.BackupConfig{ + Timestamp: "20220401102430", + Plugin: BackupS3Plugin, + }, + pluginOptions: map[string]string{"folder": "/path/to/report"}, + want: "/path/to/report/backups/20220401/20220401102430/gpbackup_20220401102430_report", + }, + { + name: "unknown plugin without custom report path", + config: history.BackupConfig{ + Timestamp: "20220401102430", + Plugin: "some_plugin", + }, + pluginOptions: map[string]string{"folder": "/path/to/report"}, + wantErr: true, + }, + } + for _, tt := range tests { + cfg := tt.config + got, err := GetReportFilePathPlugin(&cfg, tt.customReportPath, tt.pluginOptions) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(got).To(Equal(tt.want), tt.name) + } + } + }) + }) + + Describe("CheckObjectFilteringExists", func() { + It("returns correct result for various filtering scenarios", func() { + tests := []struct { + name string + tableFilter string + schemaFilter string + objectFilter string + excludeFilter bool + want bool + config history.BackupConfig + }{ + { + name: "no filters specified", + want: true, + }, + { + name: "table filter matches included table", + tableFilter: "test.table1", + objectFilter: "include-table", + want: true, + config: history.BackupConfig{ + IncludeRelations: []string{"test.table1", "test.table2"}, + }, + }, + { + name: "table filter does not match included table", + tableFilter: "test.table1", + objectFilter: "include-table", + want: false, + config: history.BackupConfig{ + IncludeRelations: []string{"test.table2", "test.table3"}, + }, + }, + { + name: "table filter with no object filter", + tableFilter: "test.table1", + objectFilter: "", + want: false, + }, + { + name: "table filter with different object filter", + tableFilter: "test.table1", + objectFilter: "include-schema", + want: false, + config: history.BackupConfig{ + IncludeSchemas: []string{"test"}, + }, + }, + { + name: "exclude table filter matches", + tableFilter: "test.table1", + objectFilter: "exclude-table", + excludeFilter: true, + want: true, + config: history.BackupConfig{ + ExcludeRelations: []string{"test.table1", "test.table2"}, + }, + }, + { + name: "exclude table filter with no object filter", + tableFilter: "test.table1", + excludeFilter: true, + want: false, + }, + { + name: "schema filter matches included schema", + schemaFilter: "test", + objectFilter: "include-schema", + want: true, + config: history.BackupConfig{ + IncludeSchemas: []string{"test"}, + }, + }, + { + name: "schema filter with no object filter", + schemaFilter: "test", + want: false, + }, + { + name: "exclude schema filter matches", + schemaFilter: "test", + objectFilter: "exclude-schema", + excludeFilter: true, + want: true, + config: history.BackupConfig{ + ExcludeSchemas: []string{"test"}, + }, + }, + { + name: "exclude schema filter with no object filter", + schemaFilter: "test", + excludeFilter: true, + want: false, + }, + } + for _, tt := range tests { + cfg := tt.config + got := CheckObjectFilteringExists(&cfg, tt.tableFilter, tt.schemaFilter, tt.objectFilter, tt.excludeFilter) + Expect(got).To(Equal(tt.want), tt.name) + } + }) + }) +}) diff --git a/gpbackman/gpbckpconfig/struct.go b/gpbackman/gpbckpconfig/struct.go new file mode 100644 index 00000000..1ed1c09b --- /dev/null +++ b/gpbackman/gpbckpconfig/struct.go @@ -0,0 +1,22 @@ +package gpbckpconfig + +const ( + Layout = "20060102150405" + DateFormat = "Mon Jan 02 2006 15:04:05" + // Backup types. + BackupTypeFull = "full" + BackupTypeIncremental = "incremental" + BackupTypeDataOnly = "data-only" + BackupTypeMetadataOnly = "metadata-only" + // Object filtering types. + objectFilteringIncludeSchema = "include-schema" + objectFilteringExcludeSchema = "exclude-schema" + objectFilteringIncludeTable = "include-table" + objectFilteringExcludeTable = "exclude-table" + // Date deleted types. + DateDeletedInProgress = "In progress" + DateDeletedPluginFailed = "Plugin Backup Delete Failed" + DateDeletedLocalFailed = "Local Delete Failed" + // BackupS3Plugin S3 plugin names. + BackupS3Plugin = "gpbackup_s3_plugin" +) diff --git a/gpbackman/gpbckpconfig/utils.go b/gpbackman/gpbckpconfig/utils.go new file mode 100644 index 00000000..ef5e07bf --- /dev/null +++ b/gpbackman/gpbckpconfig/utils.go @@ -0,0 +1,156 @@ +package gpbckpconfig + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/apache/cloudberry-backup/gpbackman/textmsg" + "github.com/apache/cloudberry-go-libs/operating" +) + +// CheckTimestamp Returns error if timestamp is not valid. +func CheckTimestamp(timestamp string) error { + timestampFormat := regexp.MustCompile(`^(\d{14})$`) + if !timestampFormat.MatchString(timestamp) { + return textmsg.ErrorValidationTimestamp() + } + return nil +} + +func GetTimestampOlderThan(value uint) string { + return time.Now().AddDate(0, 0, -int(value)).Format(Layout) +} + +// CheckFullPath Returns error if path is not an absolute path or +// file does not exist. +func CheckFullPath(checkPath string, checkFileExists bool) error { + if !filepath.IsAbs(checkPath) { + return textmsg.ErrorValidationFullPath() + } + // In most cases this check should be mandatory. + // But there are commands, that allows the history db file to be missing. + if checkFileExists { + if _, err := os.Stat(checkPath); errors.Is(err, os.ErrNotExist) { + return textmsg.ErrorFileNotExist() + } + } + return nil +} + +// CheckTableFQN Returns error if table FQN is not in the format . +func CheckTableFQN(table string) error { + format := regexp.MustCompile(`^.+\..+$`) + if !format.MatchString(table) { + return textmsg.ErrorValidationTableFQN() + } + return nil +} + +// IsBackupActive Returns true if backup is active (not deleted). +func IsBackupActive(dateDeleted string) bool { + return (dateDeleted == "" || + dateDeleted == DateDeletedPluginFailed || + dateDeleted == DateDeletedLocalFailed) +} + +// IsPositiveValue Returns true if the value is positive. +func IsPositiveValue(value int) bool { + return value > 0 +} + +// backupPluginCustomReportPath Returns custom report path: +// +// /gpbackup__report +func backupPluginCustomReportPath(timestamp, folderValue string) string { + return filepath.Join("/", folderValue, ReportFileName(timestamp)) +} + +// backupS3PluginReportPath Returns path to report file name for gpbackup_s3_plugin plugin. +// Basic path for s3 plugin format: +// +// /backups///gpbackup__report +// +// See GetS3Path() func in https://github.com/greenplum-db/gpbackup-s3-plugin. +// If folder option is not specified or it is empty, the error will be returned. +func backupS3PluginReportPath(timestamp string, pluginOptions map[string]string) (string, error) { + pathOption := "folder" + // Timestamp validation is done on flags validation. + // We assume, that is the correct value coming from. + reportPathBasic := "backups/" + timestamp[0:8] + "/" + timestamp + folderValue, exists := pluginOptions[pathOption] + if !exists || folderValue == "" { + return "", textmsg.ErrorValidationPluginOption(pathOption, BackupS3Plugin) + } + // It's necessary to return full path to report file with leading '/'. + // But in config file folder value could be with leading '/' or without. + // So we need to remove leading '/' and add it back. + folderValue = strings.TrimPrefix(folderValue, "/") + folderValue = strings.TrimSuffix(folderValue, "/") + return filepath.Join("/", folderValue, reportPathBasic, ReportFileName(timestamp)), nil +} + +// ReportFileName Returns report file name for specific timestamp. +// Report file name format: gpbackup__report. +func ReportFileName(timestamp string) string { + return "gpbackup_" + timestamp + "_report" +} + +// CheckMasterBackupDir checks the backup directory for the master backup. +// It first tries to find the backup directory in the single-backup-dir format. +// If the single-backup-dir format is not used, it returns an error. +// If the single-backup-dir format is used, it returns the backup directory and sets the prefix to an empty string. +// If the single-backup-dir format is not found, it tries to find the backup directory with segment prefix format. +// If the backup directory with segment prefix format is not found, it returns an error. +// If multiple backup directories with segment prefix format are found, it returns an error. +// Otherwise, it returns the backup directory with segment prefix format, the segment prefix, and useSingleBackupDir flag to false. +func CheckMasterBackupDir(backupDir string) (string, string, bool, error) { + // Try to find the backup directory in the single-backup-dir format. + _, err := operating.System.Stat(fmt.Sprintf("%s/backups", backupDir)) + // The single-backup-dir directory format is not used. + if err != nil && !os.IsNotExist(err) { + return "", "", false, textmsg.ErrorFindBackupDirIn(backupDir, err) + } + if err == nil { + // The single-backup-dir directory format is used, there's no prefix to parse. + return backupDir, "", true, nil + } + // Try to find the backup directory with segment prefix format. + backupDirForMaster, err := operating.System.Glob(fmt.Sprintf("%s/*-1/backups", backupDir)) + if err != nil { + return "", "", false, textmsg.ErrorFindBackupDirIn(backupDir, err) + } + if len(backupDirForMaster) == 0 { + return "", "", false, textmsg.ErrorNotFoundBackupDirIn(backupDir) + } + if len(backupDirForMaster) != 1 { + return "", "", false, textmsg.ErrorSeveralFoundBackupDirIn(backupDir) + } + segPrefix := GetSegPrefix(backupDirForMaster[0]) + returnDir := filepath.Join(backupDir, fmt.Sprintf("%s-1", segPrefix)) + return returnDir, segPrefix, false, nil +} + +// GetSegPrefix Returns segment prefix from the master backup directory. +func GetSegPrefix(backupDir string) string { + indexOfBackupsSubstr := strings.LastIndex(backupDir, "-1/backups") + _, segPrefix := path.Split(backupDir[:indexOfBackupsSubstr]) + return segPrefix +} + +// ReportFilePath Returns path to report file. +func ReportFilePath(backupDir, timestamp string) string { + return filepath.Join(BackupDirPath(backupDir, timestamp), ReportFileName(timestamp)) +} + +// BackupDirPath Returns path to full backup directory. +func BackupDirPath(backupDir, timestamp string) string { + return filepath.Join(backupDir, "backups", timestamp[0:8], timestamp) +} + + diff --git a/gpbackman/gpbckpconfig/utils_db.go b/gpbackman/gpbckpconfig/utils_db.go new file mode 100644 index 00000000..102e7290 --- /dev/null +++ b/gpbackman/gpbckpconfig/utils_db.go @@ -0,0 +1,191 @@ +package gpbckpconfig + +import ( + "database/sql" + "fmt" + "strings" + + "github.com/apache/cloudberry-backup/history" +) + +// OpenHistoryDB opens the history backup database. +func OpenHistoryDB(historyDBPath string) (*sql.DB, error) { + db, err := sql.Open("sqlite3", historyDBPath) + if err != nil { + return nil, err + } + return db, nil +} + +// GetBackupDataDB reads backup data from history database. +func GetBackupDataDB(backupName string, hDB *sql.DB) (*history.BackupConfig, error) { + return history.GetBackupConfig(backupName, hDB) +} + +// GetBackupNamesDB returns a list of backup names. +func GetBackupNamesDB(showD, showF bool, historyDB *sql.DB) ([]string, error) { + return execQueryFunc(getBackupNameQuery(showD, showF), historyDB) +} + +func GetBackupDependencies(backupName string, historyDB *sql.DB) ([]string, error) { + return execQueryFunc(getBackupDependenciesQuery(backupName), historyDB) +} + +func GetBackupNamesBeforeTimestamp(timestamp string, historyDB *sql.DB) ([]string, error) { + return execQueryFunc(getBackupNameBeforeTimestampQuery(timestamp), historyDB) +} + +func GetBackupNamesAfterTimestamp(timestamp string, historyDB *sql.DB) ([]string, error) { + return execQueryFunc(getBackupNameAfterTimestampQuery(timestamp), historyDB) +} + +func GetBackupNamesForCleanBeforeTimestamp(timestamp string, historyDB *sql.DB) ([]string, error) { + return execQueryFunc(getBackupNameForCleanBeforeTimestampQuery(timestamp), historyDB) +} + +func getBackupNameQuery(showD, showF bool) string { + orderBy := "ORDER BY timestamp DESC;" + getBackupsQuery := "SELECT timestamp FROM backups" + switch { + case showD && showF: + getBackupsQuery = fmt.Sprintf("%s %s", getBackupsQuery, orderBy) + case showD && !showF: + getBackupsQuery = fmt.Sprintf("%s WHERE status != '%s' %s", getBackupsQuery, history.BackupStatusFailed, orderBy) + case !showD && showF: + getBackupsQuery = fmt.Sprintf("%s WHERE date_deleted IN ('', '%s', '%s', '%s') %s", getBackupsQuery, DateDeletedInProgress, DateDeletedPluginFailed, DateDeletedLocalFailed, orderBy) + default: + getBackupsQuery = fmt.Sprintf("%s WHERE status != '%s' AND date_deleted IN ('', '%s', '%s', '%s') %s", getBackupsQuery, history.BackupStatusFailed, DateDeletedInProgress, DateDeletedPluginFailed, DateDeletedLocalFailed, orderBy) + } + return getBackupsQuery +} + +func getBackupDependenciesQuery(backupName string) string { + return fmt.Sprintf(` +SELECT timestamp +FROM restore_plans +WHERE timestamp != '%s' + AND restore_plan_timestamp = '%s' +ORDER BY timestamp DESC; +`, backupName, backupName) +} + +func getBackupNameBeforeTimestampQuery(timestamp string) string { + return fmt.Sprintf(` +SELECT timestamp +FROM backups +WHERE timestamp < '%s' + AND status != '%s' + AND date_deleted IN ('', '%s', '%s') +ORDER BY timestamp DESC; +`, timestamp, history.BackupStatusInProgress, DateDeletedPluginFailed, DateDeletedLocalFailed) +} + +func getBackupNameAfterTimestampQuery(timestamp string) string { + return fmt.Sprintf(` +SELECT timestamp +FROM backups +WHERE timestamp > '%s' + AND status != '%s' + AND date_deleted IN ('', '%s', '%s') +ORDER BY timestamp DESC; +`, timestamp, history.BackupStatusInProgress, DateDeletedPluginFailed, DateDeletedLocalFailed) +} + +func getBackupNameForCleanBeforeTimestampQuery(timestamp string) string { + return fmt.Sprintf(` +SELECT timestamp +FROM backups +WHERE timestamp < '%s' + AND date_deleted NOT IN ('', '%s', '%s', '%s') +ORDER BY timestamp DESC; +`, timestamp, DateDeletedPluginFailed, DateDeletedLocalFailed, DateDeletedInProgress) +} + +// UpdateDeleteStatus updates the date_deleted column in the history database. +func UpdateDeleteStatus(backupName, dateDeleted string, historyDB *sql.DB) error { + return execStatementFunc(updateDeleteStatusQuery(backupName, dateDeleted), historyDB) +} + +// CleanBackupsDB cleans the backup history database. +func CleanBackupsDB(list []string, batchSize int, historyDB *sql.DB) error { + for i := 0; i < len(list); i += batchSize { + end := i + batchSize + if end > len(list) { + end = len(list) + } + batchIDs := list[i:end] + idStr := "'" + strings.Join(batchIDs, "','") + "'" + err := execStatementFunc(deleteBackupsFormTableQuery("backups", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("restore_plans", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("restore_plan_tables", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("exclude_relations", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("exclude_schemas", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("include_relations", idStr), historyDB) + if err != nil { + return err + } + err = execStatementFunc(deleteBackupsFormTableQuery("include_schemas", idStr), historyDB) + if err != nil { + return err + } + } + return nil +} + +func deleteBackupsFormTableQuery(db, value string) string { + return fmt.Sprintf(`DELETE FROM %s WHERE timestamp IN (%s);`, db, value) +} + +func updateDeleteStatusQuery(timestamp, status string) string { + return fmt.Sprintf(`UPDATE backups SET date_deleted = '%s' WHERE timestamp = '%s';`, status, timestamp) +} + +func execQueryFunc(query string, historyDB *sql.DB) ([]string, error) { + sqlRow, err := historyDB.Query(query) + if err != nil { + return nil, err + } + defer sqlRow.Close() + var resultList []string + for sqlRow.Next() { + var b string + err := sqlRow.Scan(&b) + if err != nil { + return nil, err + } + resultList = append(resultList, b) + } + if err := sqlRow.Err(); err != nil { + return nil, err + } + return resultList, nil +} + +func execStatementFunc(query string, historyDB *sql.DB) error { + tx, err := historyDB.Begin() + if err != nil { + return err + } + _, err = tx.Exec(query) + if err != nil { + _ = tx.Rollback() + return err + } + err = tx.Commit() + return err +} diff --git a/gpbackman/gpbckpconfig/utils_db_test.go b/gpbackman/gpbckpconfig/utils_db_test.go new file mode 100644 index 00000000..ebc1e6f1 --- /dev/null +++ b/gpbackman/gpbckpconfig/utils_db_test.go @@ -0,0 +1,118 @@ +package gpbckpconfig + +import ( + "fmt" + + "github.com/apache/cloudberry-backup/history" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("utils_db tests", func() { + Describe("getBackupNameQuery", func() { + It("returns correct query for various flag combinations", func() { + tests := []struct { + name string + showD bool + showF bool + want string + }{ + { + name: "show all", + showD: true, + showF: true, + want: `SELECT timestamp FROM backups ORDER BY timestamp DESC;`, + }, + { + name: "show deleted", + showD: true, + showF: false, + want: `SELECT timestamp FROM backups WHERE status != 'Failure' ORDER BY timestamp DESC;`, + }, + { + name: "show failed", + showD: false, + showF: true, + want: `SELECT timestamp FROM backups WHERE date_deleted IN ('', 'In progress', 'Plugin Backup Delete Failed', 'Local Delete Failed') ORDER BY timestamp DESC;`, + }, + { + name: "show default", + showD: false, + showF: false, + want: `SELECT timestamp FROM backups WHERE status != 'Failure' AND date_deleted IN ('', 'In progress', 'Plugin Backup Delete Failed', 'Local Delete Failed') ORDER BY timestamp DESC;`, + }, + } + for _, tt := range tests { + Expect(getBackupNameQuery(tt.showD, tt.showF)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("getBackupDependenciesQuery", func() { + It("returns correct query", func() { + want := ` +SELECT timestamp +FROM restore_plans +WHERE timestamp != 'TestBackup' + AND restore_plan_timestamp = 'TestBackup' +ORDER BY timestamp DESC; +` + Expect(getBackupDependenciesQuery("TestBackup")).To(Equal(want)) + }) + }) + + Describe("getBackupNameBeforeTimestampQuery", func() { + It("returns correct query", func() { + want := fmt.Sprintf(` +SELECT timestamp +FROM backups +WHERE timestamp < '20240101120000' + AND status != '%s' + AND date_deleted IN ('', 'Plugin Backup Delete Failed', 'Local Delete Failed') +ORDER BY timestamp DESC; +`, history.BackupStatusInProgress) + Expect(getBackupNameBeforeTimestampQuery("20240101120000")).To(Equal(want)) + }) + }) + + Describe("getBackupNameAfterTimestampQuery", func() { + It("returns correct query", func() { + want := fmt.Sprintf(` +SELECT timestamp +FROM backups +WHERE timestamp > '20240101120000' + AND status != '%s' + AND date_deleted IN ('', 'Plugin Backup Delete Failed', 'Local Delete Failed') +ORDER BY timestamp DESC; +`, history.BackupStatusInProgress) + Expect(getBackupNameAfterTimestampQuery("20240101120000")).To(Equal(want)) + }) + }) + + Describe("getBackupNameForCleanBeforeTimestampQuery", func() { + It("returns correct query", func() { + want := ` +SELECT timestamp +FROM backups +WHERE timestamp < '20240101120000' + AND date_deleted NOT IN ('', 'Plugin Backup Delete Failed', 'Local Delete Failed', 'In progress') +ORDER BY timestamp DESC; +` + Expect(getBackupNameForCleanBeforeTimestampQuery("20240101120000")).To(Equal(want)) + }) + }) + + Describe("deleteBackupsFormTableQuery", func() { + It("returns correct query", func() { + got := deleteBackupsFormTableQuery("TestBackup", "'20220401102430', '20220401102430'") + Expect(got).To(Equal("DELETE FROM TestBackup WHERE timestamp IN ('20220401102430', '20220401102430');")) + }) + }) + + Describe("updateDeleteStatusQuery", func() { + It("returns correct query", func() { + got := updateDeleteStatusQuery("TestBackup", "20220401102430") + Expect(got).To(Equal("UPDATE backups SET date_deleted = '20220401102430' WHERE timestamp = 'TestBackup';")) + }) + }) +}) diff --git a/gpbackman/gpbckpconfig/utils_test.go b/gpbackman/gpbckpconfig/utils_test.go new file mode 100644 index 00000000..825f2a74 --- /dev/null +++ b/gpbackman/gpbckpconfig/utils_test.go @@ -0,0 +1,236 @@ +package gpbckpconfig + +import ( + "os" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("utils tests", func() { + Describe("CheckTimestamp", func() { + It("accepts valid timestamp", func() { + Expect(CheckTimestamp("20230822120000")).To(Succeed()) + }) + + It("rejects invalid timestamp", func() { + Expect(CheckTimestamp("invalid")).To(HaveOccurred()) + }) + + It("rejects wrong length timestamp", func() { + Expect(CheckTimestamp("2023082212000")).To(HaveOccurred()) + }) + }) + + Describe("CheckFullPath", func() { + It("accepts existing file with full path", func() { + tempFile, err := os.CreateTemp("", "testfile") + Expect(err).ToNot(HaveOccurred()) + defer os.Remove(tempFile.Name()) + Expect(CheckFullPath(tempFile.Name(), true)).To(Succeed()) + }) + + It("rejects non-existing file with full path", func() { + Expect(CheckFullPath("/some/path/test.txt", true)).To(HaveOccurred()) + }) + + It("rejects empty path", func() { + Expect(CheckFullPath("", false)).To(HaveOccurred()) + }) + + It("rejects relative path", func() { + Expect(CheckFullPath("test.txt", false)).To(HaveOccurred()) + }) + }) + + Describe("IsBackupActive", func() { + It("returns correct result for various date deleted values", func() { + tests := []struct { + name string + value string + want bool + }{ + {"empty delete date", "", true}, + {"plugin error", DateDeletedPluginFailed, true}, + {"local error", DateDeletedLocalFailed, true}, + {"deletion in progress", DateDeletedInProgress, false}, + {"deleted", "20220401102430", false}, + } + for _, tt := range tests { + Expect(IsBackupActive(tt.value)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("IsPositiveValue", func() { + It("returns true for positive value", func() { + Expect(IsPositiveValue(10)).To(BeTrue()) + }) + + It("returns false for zero", func() { + Expect(IsPositiveValue(0)).To(BeFalse()) + }) + + It("returns false for negative value", func() { + Expect(IsPositiveValue(-5)).To(BeFalse()) + }) + }) + + Describe("backupS3PluginReportPath", func() { + It("returns correct path for valid options", func() { + got, err := backupS3PluginReportPath("20230112131415", map[string]string{"folder": "/path/to/folder"}) + Expect(err).ToNot(HaveOccurred()) + Expect(got).To(Equal("/path/to/folder/backups/20230112/20230112131415/gpbackup_20230112131415_report")) + }) + + It("returns error for missing options", func() { + _, err := backupS3PluginReportPath("20230112131415", nil) + Expect(err).To(HaveOccurred()) + }) + + It("returns error for wrong options key", func() { + _, err := backupS3PluginReportPath("20230112131415", map[string]string{"wrong_key": "/path/to/folder"}) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("ReportFileName", func() { + It("returns correct report file name", func() { + Expect(ReportFileName("202301011234")).To(Equal("gpbackup_202301011234_report")) + }) + }) + + Describe("backupPluginCustomReportPath", func() { + It("returns correct path", func() { + tests := []struct { + name string + timestamp string + folder string + want string + }{ + {"basic", "20230101123456", "/backup/folder", "/backup/folder/gpbackup_20230101123456_report"}, + {"trailing slashes", "20230101123456", "/backup//folder//", "/backup/folder/gpbackup_20230101123456_report"}, + {"folder with spaces", "20230101123456", "/backup/folder with spaces", "/backup/folder with spaces/gpbackup_20230101123456_report"}, + {"no leading slash", "20230101123456", "backup/folder with spaces", "/backup/folder with spaces/gpbackup_20230101123456_report"}, + } + for _, tt := range tests { + Expect(backupPluginCustomReportPath(tt.timestamp, tt.folder)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("GetTimestampOlderThan", func() { + It("returns timestamp within expected range", func() { + input := uint(1) + got := GetTimestampOlderThan(input) + parsedTime, err := time.ParseInLocation(Layout, got, time.Now().Location()) + Expect(err).ToNot(HaveOccurred()) + now := time.Now() + expected := now.AddDate(0, 0, -int(input)) + Expect(parsedTime.Before(now)).To(BeTrue()) + Expect(parsedTime.Sub(expected).Seconds()).To(BeNumerically("<=", 1)) + }) + }) + + Describe("CheckTableFQN", func() { + It("accepts valid table name", func() { + Expect(CheckTableFQN("public.table_1")).To(Succeed()) + }) + + It("rejects invalid table name", func() { + Expect(CheckTableFQN("invalid_table")).To(HaveOccurred()) + }) + }) + + Describe("ReportFilePath", func() { + It("returns correct report file path", func() { + got := ReportFilePath("/path/to/backup", "20230101123456") + Expect(got).To(Equal("/path/to/backup/backups/20230101/20230101123456/gpbackup_20230101123456_report")) + }) + }) + + Describe("GetSegPrefix", func() { + It("returns correct segment prefix", func() { + Expect(GetSegPrefix("/path/to/backup/segment-1/backups")).To(Equal("segment")) + }) + }) + + Describe("CheckMasterBackupDir", func() { + It("returns correct values for various backup dirs", func() { + tempDir := os.TempDir() + tests := []struct { + name string + testDir string + backupDir string + wantDir string + wantPrefix string + wantSingleBackupDir bool + wantErr bool + }{ + { + name: "valid single backup dir", + testDir: filepath.Join(tempDir, "noSegPrefix", "backups"), + backupDir: filepath.Join(tempDir, "noSegPrefix"), + wantDir: filepath.Join(tempDir, "noSegPrefix"), + wantPrefix: "", + wantSingleBackupDir: true, + }, + { + name: "valid backup dir with segment prefix", + testDir: filepath.Join(tempDir, "segPrefix", "segment-1", "backups"), + backupDir: filepath.Join(tempDir, "segPrefix"), + wantDir: filepath.Join(tempDir, "segPrefix", "segment-1"), + wantPrefix: "segment", + wantSingleBackupDir: false, + }, + { + name: "invalid backup dir", + testDir: filepath.Join(tempDir, "invalid"), + backupDir: filepath.Join(tempDir, "invalid"), + wantErr: true, + }, + { + name: "non-existent path", + testDir: tempDir, + backupDir: "some/path", + wantErr: true, + }, + } + for _, tt := range tests { + err := os.MkdirAll(tt.testDir, 0o755) + Expect(err).ToNot(HaveOccurred()) + defer os.Remove(tt.testDir) + gotDir, gotPrefix, gotIsSingleBackupDir, err := CheckMasterBackupDir(tt.backupDir) + if tt.wantErr { + Expect(err).To(HaveOccurred(), tt.name) + } else { + Expect(err).ToNot(HaveOccurred(), tt.name) + Expect(gotDir).To(Equal(tt.wantDir), tt.name) + Expect(gotPrefix).To(Equal(tt.wantPrefix), tt.name) + Expect(gotIsSingleBackupDir).To(Equal(tt.wantSingleBackupDir), tt.name) + } + } + }) + }) + + Describe("BackupDirPath", func() { + It("returns correct backup dir path", func() { + tests := []struct { + name string + backupDir string + timestamp string + want string + }{ + {"basic path", "/data/backup", "20230101123456", "/data/backup/backups/20230101/20230101123456"}, + {"path with trailing slash", "/data/backup/", "20230101123456", "/data/backup/backups/20230101/20230101123456"}, + } + for _, tt := range tests { + Expect(BackupDirPath(tt.backupDir, tt.timestamp)).To(Equal(tt.want), tt.name) + } + }) + }) + + +}) diff --git a/gpbackman/textmsg/error.go b/gpbackman/textmsg/error.go new file mode 100644 index 00000000..c9d1e27e --- /dev/null +++ b/gpbackman/textmsg/error.go @@ -0,0 +1,253 @@ +package textmsg + +import ( + "errors" + "fmt" + "strings" +) + +// Collection of possible error texts. + +// Errors that occur when working with a history db. + +func ErrorTextUnableActionHistoryDB(value string, err error) string { + return fmt.Sprintf("Unable to %s history db. Error: %v", value, err) +} + +func ErrorTextUnableReadHistoryDB(err error) string { + return fmt.Sprintf("Unable to read data from history db. Error: %v", err) +} + +// Errors that occur when working with a backup data. + +func ErrorTextUnableGetBackupInfo(backupName string, err error) string { + return fmt.Sprintf("Unable to get info for backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableGetBackupValue(value, backupName string, err error) string { + return fmt.Sprintf("Unable to get backup %s for backup %s. Error: %v", value, backupName, err) +} + +func ErrorTextUnableSetBackupStatus(value, backupName string, err error) string { + return fmt.Sprintf("Unable to set %s status for backup %s. Error: %v", value, backupName, err) +} + +func ErrorTextUnableDeleteBackup(backupName string, err error) string { + return fmt.Sprintf("Unable to delete backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableWorkBackup(backupName string, err error) string { + return fmt.Sprintf("Unable to work with backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableDeleteBackupCascade(backupName string, err error) string { + return fmt.Sprintf("Unable to delete dependent backups for backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableDeleteBackupUseCascade(backupName string, err error) string { + return fmt.Sprintf("Backup %s has dependent backups. Use --cascade option. Error: %v", backupName, err) +} + +func ErrorTextBackupDeleteInProgress(backupName string, err error) string { + return fmt.Sprintf("Backup %s deletion in progress. Error: %v", backupName, err) +} + +func ErrorTextUnableGetBackupReport(backupName string, err error) string { + return fmt.Sprintf("Unable to get report for the backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableGetBackupPath(value, backupName string, err error) string { + return fmt.Sprintf("Unable to get path to %s for the backup %s. Error: %v", value, backupName, err) +} + +// Errors that occur when working with a backup plugin. + +func ErrorTextUnableReadPluginConfigFile(err error) string { + return fmt.Sprintf("Unable to read plugin config file. Error: %v", err) +} + +// Error that occur when working with a local backup. + +func ErrorTextCommandExecutionFailed(err error, values ...string) string { + return fmt.Sprintf("Command failed: %s. Error: %v", strings.Join(values, " "), err) +} + +// Errors that occur during flags validation. + +func ErrorTextUnableValidateFlag(value, flag string, err error) string { + return fmt.Sprintf( + "Unable to validate value %s for flag %s. Error: %v", value, flag, err) +} + +func ErrorTextUnableCompatibleFlags(err error, values ...string) string { + return fmt.Sprintf( + "Unable to use the following flags together: %s. Error: %v", + strings.Join(values, ", "), err) +} + +func ErrorTextUnableValidateValue(err error, values ...string) string { + return fmt.Sprintf("Unable to validate provided arguments. Try to use one of flags: %s. Error: %v", + strings.Join(values, ", "), err) +} + +// Errors that occur when working with a local cluster. + +func ErrorTextUnableConnectLocalCluster(err error) string { + return fmt.Sprintf("Unable to connect to the cluster locally. Error: %v", err) +} + +func ErrorTextUnableGetBackupDirLocalClusterConn(err error) string { + return fmt.Sprintf("Unable to get backup directory from a local connection to the cluster. Error: %v", err) +} + +// Errors that occur when working with backup reports. + +func ErrorTextUnableGetReport(err error) string { + return fmt.Sprintf("Unable to get report. Error: %v", err) +} + +func ErrorTextUnableCheckTimestamp(err error) string { + return fmt.Sprintf("Unable to check timestamp. Error: %v", err) +} + +func ErrorTextUnableGetSegPrefix(err error) string { + return fmt.Sprintf("Unable to get segment prefix. Error: %v", err) +} + +func ErrorTextUnableCheckPath(err error) string { + return fmt.Sprintf("Unable to check path. Error: %v", err) +} + +// Errors that occur when working with local backup directories. + +func ErrorTextUnableDeleteLocalBackup(err error) string { + return fmt.Sprintf("Unable to delete local backup. Error: %v", err) +} + +func ErrorTextUnableCleanDB(err error) string { + return fmt.Sprintf("Unable to clean db. Error: %v", err) +} + +func ErrorTextUnableDeletePluginBackup(backupName string, err error) string { + return fmt.Sprintf("Unable to delete plugin backup %s. Error: %v", backupName, err) +} + +func ErrorTextUnableDeletePluginReport(backupName string, err error) string { + return fmt.Sprintf("Unable to delete plugin report %s. Error: %v", backupName, err) +} + +func ErrorTextUnableUpdateDeleteStatus(backupName string, err error) string { + return fmt.Sprintf("Unable to update delete status for %s. Error: %v", backupName, err) +} + +func ErrorTextUnableCheckBackupDir(backupName string, err error) string { + return fmt.Sprintf("Unable to check backup dir %s. Error: %v", backupName, err) +} + +func ErrorTextUnableDeleteFile(value1, value2 string, err error) string { + return fmt.Sprintf("Unable to delete %s %s. Error: %v", value1, value2, err) +} + +func InfoTextSetBackupDeleteStatus(backupName, status string) string { + return fmt.Sprintf("Set backup %s delete status to %s", backupName, status) +} + +// Error that is returned when flags validation not passed. + +func ErrorInvalidValueError() error { + return errors.New("invalid flag value") +} + +func ErrorIncompatibleFlagsError() error { + return errors.New("incompatible flags") +} + +func ErrorNotIndependentFlagsError() error { + return errors.New("not an independent flag") +} + +// Error that is returned when backup deletion fails. + +func ErrorBackupDeleteInProgressError() error { + return errors.New("backup deletion in progress") +} + +func ErrorBackupDeleteCascadeOptionError() error { + return errors.New("use cascade option") +} + +func ErrorBackupLocalStorageError() error { + return errors.New("is a local backup") +} + +func ErrorBackupNotLocalStorageError() error { + return errors.New("is not a local backup") +} + +// Error that is returned when some validation fails. + +func ErrorValidationFullPath() error { + return errors.New("not an absolute path") +} + +func ErrorFileNotExist() error { + return errors.New("file not exist") +} + +func ErrorValidationTableFQN() error { + return errors.New("not a fully qualified table name") +} + +func ErrorValidationTimestamp() error { + return errors.New("not a timestamp") +} + +func ErrorValidationValue() error { + return errors.New("value not set") +} + +func ErrorEmptyDatabase() error { + return errors.New("database name cannot be empty") +} + +// Error that is returned when some plugin options validation fails. + +func ErrorValidationPluginOption(value, pluginName string) error { + return fmt.Errorf("invalid plugin %s option value for plugin %s", value, pluginName) +} + +// Errors that are returned when some backup directory validation fails. + +func ErrorFindBackupDirIn(value string, err error) error { + return fmt.Errorf("can not find backup directory in %s, error: %v", value, err.Error()) +} + +func ErrorNotFoundBackupDirIn(value string) error { + return fmt.Errorf("no backup directory found in %s", value) +} + +func ErrorSeveralFoundBackupDirIn(value string) error { + return fmt.Errorf("several backup directory found in %s", value) +} + +// Error that is returned when backup not found. + +func ErrorBackupNotFoundError(backupName string) error { + return fmt.Errorf("backup %s not found", backupName) +} + +func ErrorInvalidInputValueError(value string) error { + return fmt.Errorf("invalid input value: %s", value) +} + +// Error that is returned when backup has specific delete status. + +func ErrorSetBackupDeleteStatus(backupName, status string) error { + return fmt.Errorf("backup %s has delete status %s", backupName, status) +} + +// Error that is returned when backup dir is not specified. + +func ErrorBackupDirNotSpecifiedError() error { + return errors.New("backup dir is not specified") +} diff --git a/gpbackman/textmsg/error_test.go b/gpbackman/textmsg/error_test.go new file mode 100644 index 00000000..b94ef9da --- /dev/null +++ b/gpbackman/textmsg/error_test.go @@ -0,0 +1,145 @@ +package textmsg + +import ( + "errors" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("error tests", func() { + Describe("error text functions with error only", func() { + It("returns correct error text", func() { + testError := errors.New("test error") + tests := []struct { + name string + function func(error) string + want string + }{ + {"ErrorTextUnableReadHistoryDB", ErrorTextUnableReadHistoryDB, "Unable to read data from history db. Error: test error"}, + {"ErrorTextUnableGetReport", ErrorTextUnableGetReport, "Unable to get report. Error: test error"}, + {"ErrorTextUnableCheckTimestamp", ErrorTextUnableCheckTimestamp, "Unable to check timestamp. Error: test error"}, + {"ErrorTextUnableGetSegPrefix", ErrorTextUnableGetSegPrefix, "Unable to get segment prefix. Error: test error"}, + {"ErrorTextUnableCheckPath", ErrorTextUnableCheckPath, "Unable to check path. Error: test error"}, + {"ErrorTextUnableDeleteLocalBackup", ErrorTextUnableDeleteLocalBackup, "Unable to delete local backup. Error: test error"}, + {"ErrorTextUnableCleanDB", ErrorTextUnableCleanDB, "Unable to clean db. Error: test error"}, + } + for _, tt := range tests { + Expect(tt.function(testError)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error text functions with error and arg", func() { + It("returns correct error text", func() { + testError := errors.New("test error") + tests := []struct { + name string + value string + function func(string, error) string + want string + }{ + {"ErrorTextUnableGetBackupInfo", "TestValue", ErrorTextUnableGetBackupInfo, "Unable to get info for backup TestValue. Error: test error"}, + {"ErrorTextUnableDeletePluginBackup", "TestValue", ErrorTextUnableDeletePluginBackup, "Unable to delete plugin backup TestValue. Error: test error"}, + {"ErrorTextUnableDeletePluginReport", "TestValue", ErrorTextUnableDeletePluginReport, "Unable to delete plugin report TestValue. Error: test error"}, + {"ErrorTextUnableUpdateDeleteStatus", "TestValue", ErrorTextUnableUpdateDeleteStatus, "Unable to update delete status for TestValue. Error: test error"}, + {"ErrorTextUnableCheckBackupDir", "TestValue", ErrorTextUnableCheckBackupDir, "Unable to check backup dir TestValue. Error: test error"}, + {"ErrorTextUnableActionHistoryDB", "TestAction", ErrorTextUnableActionHistoryDB, "Unable to TestAction history db. Error: test error"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value, testError)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error text functions with error and two args", func() { + It("returns correct error text", func() { + testError := errors.New("test error") + tests := []struct { + name string + value1 string + value2 string + function func(string, string, error) string + want string + }{ + {"ErrorTextUnableDeleteFile", "TestValue1", "TestValue2", ErrorTextUnableDeleteFile, "Unable to delete TestValue1 TestValue2. Error: test error"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value1, tt.value2, testError)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error text functions with error and multiple args", func() { + It("returns correct error text", func() { + testError := errors.New("test error") + tests := []struct { + name string + values []string + function func(error, ...string) string + want string + }{ + {"ErrorTextUnableCompatibleFlags", []string{"flag1", "flag2"}, ErrorTextUnableCompatibleFlags, "Unable to use the following flags together: flag1, flag2. Error: test error"}, + } + for _, tt := range tests { + Expect(tt.function(testError, tt.values...)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error functions with one arg", func() { + It("returns correct error", func() { + tests := []struct { + name string + value string + function func(string) error + want string + }{ + {"ErrorBackupNotFoundError", "TestBackup", ErrorBackupNotFoundError, "backup TestBackup not found"}, + {"ErrorInvalidInputValueError", "TestValue", ErrorInvalidInputValueError, "invalid input value: TestValue"}, + } + for _, tt := range tests { + err := tt.function(tt.value) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error functions with two args", func() { + It("returns correct error", func() { + tests := []struct { + name string + value1 string + value2 string + function func(string, string) error + want string + }{ + {"ErrorSetBackupDeleteStatus", "TestBackup", "TestStatus", ErrorSetBackupDeleteStatus, "backup TestBackup has delete status TestStatus"}, + } + for _, tt := range tests { + err := tt.function(tt.value1, tt.value2) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("error functions returning error", func() { + It("returns correct error", func() { + tests := []struct { + name string + function func() error + want string + }{ + {"ErrorBackupDirNotSpecifiedError", ErrorBackupDirNotSpecifiedError, "backup dir is not specified"}, + {"ErrorBackupDeleteInProgressError", ErrorBackupDeleteInProgressError, "backup deletion in progress"}, + } + for _, tt := range tests { + err := tt.function() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal(tt.want), tt.name) + } + }) + }) +}) diff --git a/gpbackman/textmsg/info.go b/gpbackman/textmsg/info.go new file mode 100644 index 00000000..8ab403a6 --- /dev/null +++ b/gpbackman/textmsg/info.go @@ -0,0 +1,54 @@ +package textmsg + +import ( + "fmt" + "strings" +) + +func InfoTextBackupDeleteStart(backupName string) string { + return fmt.Sprintf("Start deleting backup %s", backupName) +} + +func InfoTextBackupAlreadyDeleted(backupName string) string { + return fmt.Sprintf("Backup %s has already been deleted", backupName) +} + +func InfoTextBackupStatus(backupName, backupStatus string) string { + return fmt.Sprintf("Backup %s has status: %s", backupName, backupStatus) +} + +func InfoTextBackupDeleteSuccess(backupName string) string { + return fmt.Sprintf("Backup %s successfully deleted", backupName) +} + +func InfoTextBackupDependenciesList(backupName string, list []string) string { + return fmt.Sprintf("Backup %s has dependent backups: %s", backupName, strings.Join(list, ", ")) +} + +func InfoTextBackupDeleteList(list []string) string { + return fmt.Sprintf("The following backups will be deleted: %s", strings.Join(list, ", ")) +} + +func InfoTextBackupDeleteListFromHistory(list []string) string { + return fmt.Sprintf("The following backups will be deleted from history: %s", strings.Join(list, ", ")) +} + +func InfoTextCommandExecution(list ...string) string { + return fmt.Sprintf("Executing command: %s", strings.Join(list, " ")) +} + +func InfoTextCommandExecutionSucceeded(list ...string) string { + return fmt.Sprintf("Command succeeded: %s", strings.Join(list, " ")) +} + +func InfoTextBackupDirPath(backupDir string) string { + return fmt.Sprintf("Path to backup directory: %s", backupDir) +} + +func InfoTextSegmentPrefix(segPrefix string) string { + return fmt.Sprintf("Segment Prefix: %s", segPrefix) +} + +func InfoTextNothingToDo() string { + return "Nothing to do" +} diff --git a/gpbackman/textmsg/info_test.go b/gpbackman/textmsg/info_test.go new file mode 100644 index 00000000..03e38214 --- /dev/null +++ b/gpbackman/textmsg/info_test.go @@ -0,0 +1,102 @@ +package textmsg + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("info tests", func() { + Describe("info text functions with one arg", func() { + It("returns correct info text", func() { + tests := []struct { + name string + value string + function func(string) string + want string + }{ + {"InfoTextBackupDeleteStart", "TestBackup", InfoTextBackupDeleteStart, "Start deleting backup TestBackup"}, + {"InfoTextBackupDeleteSuccess", "TestBackup", InfoTextBackupDeleteSuccess, "Backup TestBackup successfully deleted"}, + {"InfoTextBackupAlreadyDeleted", "TestBackup", InfoTextBackupAlreadyDeleted, "Backup TestBackup has already been deleted"}, + {"InfoTextBackupDirPath", "/test/path", InfoTextBackupDirPath, "Path to backup directory: /test/path"}, + {"InfoTextSegmentPrefix", "TestValue", InfoTextSegmentPrefix, "Segment Prefix: TestValue"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("info text functions with two args", func() { + It("returns correct info text", func() { + tests := []struct { + name string + value1 string + value2 string + function func(string, string) string + want string + }{ + {"InfoTextBackupStatus", "TestBackup", "In Progress", InfoTextBackupStatus, "Backup TestBackup has status: In Progress"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value1, tt.value2)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("info text functions with multiple args", func() { + It("returns correct info text", func() { + tests := []struct { + name string + value string + valueList []string + function func(string, []string) string + want string + }{ + {"InfoTextBackupDependenciesList", "TestBackup1", []string{"TestBackup2", "TestBackup3"}, InfoTextBackupDependenciesList, "Backup TestBackup1 has dependent backups: TestBackup2, TestBackup3"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value, tt.valueList)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("info text functions with multiple separate args", func() { + It("returns correct info text", func() { + tests := []struct { + name string + values []string + function func(...string) string + want string + }{ + {"InfoTextCommandExecution", []string{"execution_command", "some_argument"}, InfoTextCommandExecution, "Executing command: execution_command some_argument"}, + {"InfoTextCommandExecutionSucceeded", []string{"execution_command", "some_argument"}, InfoTextCommandExecutionSucceeded, "Command succeeded: execution_command some_argument"}, + } + for _, tt := range tests { + Expect(tt.function(tt.values...)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("info text functions with list args", func() { + It("returns correct info text", func() { + tests := []struct { + name string + values []string + function func([]string) string + want string + }{ + {"InfoTextBackupDeleteList", []string{"TestBackup1", "TestBackup2"}, InfoTextBackupDeleteList, "The following backups will be deleted: TestBackup1, TestBackup2"}, + {"InfoTextBackupDeleteListFromHistory", []string{"TestBackup1", "TestBackup2"}, InfoTextBackupDeleteListFromHistory, "The following backups will be deleted from history: TestBackup1, TestBackup2"}, + } + for _, tt := range tests { + Expect(tt.function(tt.values)).To(Equal(tt.want), tt.name) + } + }) + }) + + Describe("info text functions with no args", func() { + It("returns correct info text", func() { + Expect(InfoTextNothingToDo()).To(Equal("Nothing to do")) + }) + }) +}) diff --git a/gpbackman/textmsg/textmsg_suite_test.go b/gpbackman/textmsg/textmsg_suite_test.go new file mode 100644 index 00000000..c4affdcf --- /dev/null +++ b/gpbackman/textmsg/textmsg_suite_test.go @@ -0,0 +1,13 @@ +package textmsg + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestTextmsg(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Textmsg Suite") +} diff --git a/gpbackman/textmsg/warn.go b/gpbackman/textmsg/warn.go new file mode 100644 index 00000000..d5bd0c5e --- /dev/null +++ b/gpbackman/textmsg/warn.go @@ -0,0 +1,7 @@ +package textmsg + +import "fmt" + +func WarnTextBackupUnableGetReport(backupName string) string { + return fmt.Sprintf("Unable to get report for backup %s. Check if backup is active", backupName) +} diff --git a/gpbackman/textmsg/warn_test.go b/gpbackman/textmsg/warn_test.go new file mode 100644 index 00000000..b5f3ad68 --- /dev/null +++ b/gpbackman/textmsg/warn_test.go @@ -0,0 +1,24 @@ +package textmsg + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("warn tests", func() { + Describe("warn text functions with one arg", func() { + It("returns correct warn text", func() { + tests := []struct { + name string + value string + function func(string) string + want string + }{ + {"WarnTextBackupUnableGetReport", "TestBackup", WarnTextBackupUnableGetReport, "Unable to get report for backup TestBackup. Check if backup is active"}, + } + for _, tt := range tests { + Expect(tt.function(tt.value)).To(Equal(tt.want), tt.name) + } + }) + }) +})