From 4943c1aaf6de939437a345634452a884feb683ac Mon Sep 17 00:00:00 2001 From: Juan Manuel Parrilla Madrid Date: Tue, 7 Apr 2026 22:51:57 +0200 Subject: [PATCH 1/3] docs: add HCPEtcdBackup implementation reference Document the full HCPEtcdBackup integration including architecture, backup/restore flows, configuration, credential handling, storage layout, dependency chain (PRs #8139, #8010, #8017, #8040, #8114, enhancement #1945), and troubleshooting guide. Co-Authored-By: Claude Opus 4.6 Signed-off-by: Juan Manuel Parrilla Madrid --- .../HCPEtcdBackup/HCPEtcdBackup-implementation.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md b/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md index 8241943d4..69707c912 100644 --- a/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md +++ b/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md @@ -452,9 +452,9 @@ oc get hostedcluster -n \ -o jsonpath='{.spec.etcd.managed.storage.restoreSnapshotURL}' ``` -The output should be an array containing the snapshot URL, e.g.: -``` -["s3:////backups/hcp-aws-backup/etcd-backup/1775589976.db"] +The output should be an array containing a **pre-signed HTTPS URL**, e.g.: +```json +["https://my-bucket.s3.us-east-1.amazonaws.com/path/to/snapshot.db?X-Amz-Algorithm=AWS4-HMAC-SHA256&..."] ``` ### Manual restoreSnapshotURL Injection From 0512695e79b8703002ca45949a97e4828dbe2768 Mon Sep 17 00:00:00 2001 From: Juan Manuel Parrilla Madrid Date: Thu, 9 Apr 2026 15:45:56 +0200 Subject: [PATCH 2/3] feat(restore): inject pre-signed etcd snapshot URL during OADP restore During restore, read the etcd snapshot S3 URL from the hypershift.openshift.io/etcd-snapshot-url annotation (persisted by the backup plugin since Velero strips status fields), convert it to a pre-signed HTTPS GET URL using AWS SigV4, and inject it into spec.etcd.managed.storage.restoreSnapshotURL so the etcd-init container can download the snapshot via curl. The s3presign package implements SigV4 signing using only the Go stdlib (no AWS SDK dependency), supporting virtual-hosted/path-style addressing, custom S3 endpoints, and STS session tokens. Co-Authored-By: Claude Opus 4.6 Signed-off-by: Juan Manuel Parrilla Madrid --- .../HCPEtcdBackup-implementation.md | 41 +- pkg/common/utils.go | 19 +- pkg/core/restore.go | 92 ++++ pkg/core/restore_test.go | 169 +++++++ pkg/s3presign/presign.go | 262 +++++++++++ pkg/s3presign/presign_test.go | 425 ++++++++++++++++++ .../s3presign/presign_integration_test.go | 361 +++++++++++++++ 7 files changed, 1351 insertions(+), 18 deletions(-) create mode 100644 pkg/core/restore_test.go create mode 100644 pkg/s3presign/presign.go create mode 100644 pkg/s3presign/presign_test.go create mode 100644 tests/integration/s3presign/presign_integration_test.go diff --git a/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md b/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md index 69707c912..7aff5b72e 100644 --- a/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md +++ b/docs/references/HCPEtcdBackup/HCPEtcdBackup-implementation.md @@ -73,7 +73,8 @@ The plugin supports two mutually exclusive etcd backup methods, controlled by th |---|---| | `pkg/etcdbackup/orchestrator.go` | Core orchestration: BSL mapping, CR creation, polling, credential copy | | `pkg/core/backup.go` | Backup plugin: etcd backup method routing, pod/PVC exclusion | -| `pkg/core/restore.go` | Restore plugin: snapshotURL injection into HostedCluster spec | +| `pkg/core/restore.go` | Restore plugin: annotation-based snapshotURL reading, S3 pre-signing, injection into HostedCluster spec | +| `pkg/s3presign/presign.go` | Pure-stdlib AWS SigV4 pre-signed URL generation (no AWS SDK dependency) | | `pkg/common/types.go` | Shared constants for backup methods, annotations, volume names | | `pkg/common/scheme.go` | Scheme registration including apiextensionsv1 for CRD checks | @@ -116,9 +117,13 @@ The `Execute()` method is called once per backed-up item, with no guaranteed ord 1. When the `HostedCluster` item is processed during restore, the plugin reads the etcd snapshot URL from the annotation `hypershift.openshift.io/etcd-snapshot-url`. This annotation is set during backup because Velero strips status fields from items during restore. -2. If the URL is present and the HC has managed etcd (`spec.etcd.managed != nil`), the plugin injects the URL into `spec.etcd.managed.storage.restoreSnapshotURL`. +2. During **restore**, the restore plugin reads the annotation from the HostedCluster metadata. -3. The modified HC is written back to Velero's output, so when the HC is created in the target cluster, the HyperShift Operator uses the snapshot URL to restore etcd from the snapshot. +3. If the URL uses the `s3://` scheme, the plugin converts it to a **pre-signed HTTPS GET URL** using AWS SigV4. This is required because the `etcd-init` container downloads the snapshot using `curl`, which cannot handle S3-native URLs. The pre-signed URL is generated using credentials from the Velero BackupStorageLocation (BSL) credential Secret. + +4. The plugin injects the pre-signed URL into `spec.etcd.managed.storage.restoreSnapshotURL` on the HostedCluster. + +5. The modified HC is written back to Velero's output. When the HC is created in the target cluster, the HyperShift Operator configures the etcd StatefulSet with an init container that downloads and restores the snapshot. ### Why an Annotation Instead of Status @@ -126,6 +131,17 @@ Velero strips the `status` subresource from items during restore. The backup plu > **Note**: The `lastSuccessfulEtcdBackupURL` status field is also set via unstructured map access during backup for informational purposes, but the restore flow relies exclusively on the annotation. +### S3 Pre-Signing + +The `s3presign` package (`pkg/s3presign/presign.go`) implements AWS SigV4 pre-signed URL generation using only the Go standard library (no AWS SDK dependency). It supports: + +- Virtual-hosted and path-style S3 addressing +- Custom S3-compatible endpoints (MinIO, RHOCS, etc.) +- STS session tokens +- Configurable URL expiry (default: 1 hour) + +The credentials are read from the BSL credential Secret referenced in the Velero Backup's `spec.storageLocation`. + ## Configuration ### Plugin ConfigMap @@ -308,8 +324,7 @@ The overall design is defined in [Enhancement PR #1945](https://github.com/opens Once both HyperShift PRs are merged, the vendor must be updated to: -1. Replace `getLastSuccessfulEtcdBackupURL()` unstructured helper in `pkg/core/restore.go` with direct field access: `hc.Status.LastSuccessfulEtcdBackupURL` -2. Remove local constants (`BackupInProgressReason`, `BackupRejectedReason`, `EtcdBackupSucceeded`) in `pkg/common/types.go` in favor of the API-defined constants +1. Remove local constants (`BackupInProgressReason`, `BackupRejectedReason`, `EtcdBackupSucceeded`) in `pkg/common/types.go` in favor of the API-defined constants ## Manual Testing @@ -409,7 +424,7 @@ aws s3 ls s3:////backups/hcp-aws-backup/etcd-backup/ ```bash oc get hostedcluster -n \ - -o jsonpath='{.status.lastSuccessfulEtcdBackupURL}' + -o jsonpath='{.metadata.annotations.hypershift\.openshift\.io/etcd-snapshot-url}' ``` ### Testing the Restore Flow @@ -457,21 +472,15 @@ The output should be an array containing a **pre-signed HTTPS URL**, e.g.: ["https://my-bucket.s3.us-east-1.amazonaws.com/path/to/snapshot.db?X-Amz-Algorithm=AWS4-HMAC-SHA256&..."] ``` -### Manual restoreSnapshotURL Injection +The restore plugin automatically converts the `s3://` URL stored in the annotation to a pre-signed HTTPS URL so the `etcd-init` container can download it via `curl`. -To manually test the restore without going through the full OADP restore flow, you can patch the HostedCluster directly. Note that `restoreSnapshotURL` is an **array**, not a string: +4. Verify the etcd-init container downloads and restores the snapshot: ```bash -# Correct — array syntax -oc patch hostedcluster -n --type=merge \ - -p '{"spec":{"etcd":{"managed":{"storage":{"restoreSnapshotURL":["s3:////etcd-backup/snapshot.db"]}}}}}' - -# Wrong — will be rejected by the API -oc patch hostedcluster -n --type=merge \ - -p '{"spec":{"etcd":{"managed":{"storage":{"restoreSnapshotURL":"s3://..."}}}}}' +oc logs etcd-0 -c etcd-init -n ``` -> **Important**: The `restoreSnapshotURL` field only takes effect during HostedCluster bootstrap (initial etcd creation). Patching it on an already running cluster will not trigger an etcd restore. To test a full restore, the HostedCluster must be deleted and recreated via the OADP restore flow. +> **Important**: The `restoreSnapshotURL` field only takes effect during HostedCluster bootstrap (initial etcd creation). The `etcd-init` container skips the restore if `/var/lib/data` is not empty. To test a full restore, the HostedCluster must be deleted and recreated via the OADP restore flow. ### Verifying volumeSnapshot Method is Unchanged diff --git a/pkg/common/utils.go b/pkg/common/utils.go index 96f48c224..c93a96319 100644 --- a/pkg/common/utils.go +++ b/pkg/common/utils.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "sync" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/sirupsen/logrus" @@ -25,9 +26,23 @@ import ( ) var ( - k8sSAFilePath = DefaultK8sSAFilePath + k8sSAFilePath = DefaultK8sSAFilePath + k8sSAFilePathMu sync.RWMutex ) +// SetK8sSAFilePath overrides the service account file path (for testing). +func SetK8sSAFilePath(path string) { + k8sSAFilePathMu.Lock() + defer k8sSAFilePathMu.Unlock() + k8sSAFilePath = path +} + +func getK8sSAFilePath() string { + k8sSAFilePathMu.RLock() + defer k8sSAFilePathMu.RUnlock() + return k8sSAFilePath +} + func getMetadataAndAnnotations(item runtime.Unstructured) (metav1.Object, map[string]string, error) { metadata, err := meta.Accessor(item) if err != nil { @@ -73,7 +88,7 @@ func GetConfig() (*rest.Config, error) { // "/var/run/secrets/kubernetes.io/serviceaccount/namespace". If there is an error // reading the file, it returns an empty string and the error. func GetCurrentNamespace() (string, error) { - namespaceFilePath := filepath.Join(k8sSAFilePath, "namespace") + namespaceFilePath := filepath.Join(getK8sSAFilePath(), "namespace") namespace, err := os.ReadFile(namespaceFilePath) if err != nil { return "", err diff --git a/pkg/core/restore.go b/pkg/core/restore.go index 6ce01cb31..1dad8ce0c 100644 --- a/pkg/core/restore.go +++ b/pkg/core/restore.go @@ -4,11 +4,13 @@ import ( "context" "fmt" "slices" + "strings" hive "github.com/openshift/hive/apis/hive/v1" common "github.com/openshift/hypershift-oadp-plugin/pkg/common" plugtypes "github.com/openshift/hypershift-oadp-plugin/pkg/core/types" validation "github.com/openshift/hypershift-oadp-plugin/pkg/core/validation" + "github.com/openshift/hypershift-oadp-plugin/pkg/s3presign" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/sirupsen/logrus" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -175,6 +177,37 @@ func (p *RestorePlugin) Execute(input *velero.RestoreItemActionExecuteInput) (*v } common.AddAnnotation(metadata, common.HostedClusterRestoredFromBackupAnnotation, "") p.log.Infof("Added restore annotation to HostedCluster %s", metadata.GetName()) + + // Inject restoreSnapshotURL if etcd backup URL is available. + // Read from annotation because Velero strips status during restore. + annotations := metadata.GetAnnotations() + snapshotURL := annotations[common.EtcdSnapshotURLAnnotation] + if snapshotURL != "" { + // Convert s3:// to pre-signed HTTPS URL + if strings.HasPrefix(snapshotURL, "s3://") { + presigned, err := p.presignS3URL(ctx, backup, snapshotURL) + if err != nil { + return nil, fmt.Errorf("error generating pre-signed URL for etcd snapshot: %v", err) + } + p.log.Infof("Converted s3:// URL to pre-signed HTTPS URL for HostedCluster restore") + snapshotURL = presigned + } + + hc := &hyperv1.HostedCluster{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), hc); err != nil { + return nil, fmt.Errorf("error converting item to HostedCluster: %v", err) + } + if hc.Spec.Etcd.Managed != nil { + hc.Spec.Etcd.Managed.Storage.RestoreSnapshotURL = []string{snapshotURL} + p.log.Infof("Injected restoreSnapshotURL into HostedCluster %s", hc.Name) + + unstructuredHC, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hc) + if err != nil { + return nil, fmt.Errorf("error converting HostedCluster to unstructured: %v", err) + } + input.Item.SetUnstructuredContent(unstructuredHC) + } + } } case kind == common.ClusterDeploymentKind: @@ -194,3 +227,62 @@ func (p *RestorePlugin) Execute(input *velero.RestoreItemActionExecuteInput) (*v return velero.NewRestoreItemActionExecuteOutput(input.Item), nil } + +// presignS3URL converts an s3:// URL into a pre-signed HTTPS GET URL using +// credentials from the Velero BackupStorageLocation. +func (p *RestorePlugin) presignS3URL(ctx context.Context, backup *velerov1api.Backup, s3URL string) (string, error) { + bucket, key, err := s3presign.ParseS3URL(s3URL) + if err != nil { + return "", fmt.Errorf("error parsing S3 URL %q: %w", s3URL, err) + } + + // Fetch BSL + bsl := &velerov1api.BackupStorageLocation{} + oadpNS, err := common.GetCurrentNamespace() + if err != nil { + return "", fmt.Errorf("error getting current namespace: %w", err) + } + + bslName := backup.Spec.StorageLocation + if err := p.client.Get(ctx, types.NamespacedName{Name: bslName, Namespace: oadpNS}, bsl); err != nil { + return "", fmt.Errorf("error getting BackupStorageLocation %q: %w", bslName, err) + } + + // Read BSL config + region := bsl.Spec.Config["region"] + endpoint := bsl.Spec.Config["s3Url"] + forcePathStyle := bsl.Spec.Config["s3ForcePathStyle"] == "true" + + // Read BSL credential Secret + if bsl.Spec.Credential == nil { + return "", fmt.Errorf("BSL %q has no credential reference", bsl.Name) + } + + secret := &corev1.Secret{} + if err := p.client.Get(ctx, types.NamespacedName{Name: bsl.Spec.Credential.Name, Namespace: oadpNS}, secret); err != nil { + return "", fmt.Errorf("error getting BSL credential secret %q: %w", bsl.Spec.Credential.Name, err) + } + + credData, ok := secret.Data[bsl.Spec.Credential.Key] + if !ok { + return "", fmt.Errorf("key %q not found in secret %q", bsl.Spec.Credential.Key, bsl.Spec.Credential.Name) + } + + creds, err := s3presign.ParseAWSCredentials(credData, "default") + if err != nil { + return "", fmt.Errorf("error parsing AWS credentials: %w", err) + } + + return s3presign.GeneratePresignedGetURL(s3presign.PresignOptions{ + Bucket: bucket, + Key: key, + Region: region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Expiry: s3presign.DefaultPresignExpiry, + Endpoint: endpoint, + ForcePathStyle: forcePathStyle, + }) +} + diff --git a/pkg/core/restore_test.go b/pkg/core/restore_test.go new file mode 100644 index 000000000..553f2b43b --- /dev/null +++ b/pkg/core/restore_test.go @@ -0,0 +1,169 @@ +package core + +import ( + "context" + "net/url" + "os" + "testing" + + common "github.com/openshift/hypershift-oadp-plugin/pkg/common" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/sirupsen/logrus" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestPresignS3URL(t *testing.T) { + scheme := runtime.NewScheme() + _ = hyperv1.AddToScheme(scheme) + _ = velerov1api.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + + credentialData := []byte(`[default] +aws_access_key_id = AKIAIOSFODNN7EXAMPLE +aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +`) + + bsl := &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "openshift-adp", + }, + Spec: velerov1api.BackupStorageLocationSpec{ + Config: map[string]string{ + "region": "us-east-1", + }, + Credential: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "cloud-credentials", + }, + Key: "cloud", + }, + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cloud-credentials", + Namespace: "openshift-adp", + }, + Data: map[string][]byte{ + "cloud": credentialData, + }, + } + + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-backup", + Namespace: "openshift-adp", + }, + Spec: velerov1api.BackupSpec{ + StorageLocation: "default", + }, + } + + // Override namespace resolution for tests + origSAPath := common.DefaultK8sSAFilePath + nsDir := t.TempDir() + if err := os.WriteFile(nsDir+"/namespace", []byte("openshift-adp"), 0644); err != nil { + t.Fatalf("failed to write namespace file: %v", err) + } + common.SetK8sSAFilePath(nsDir) + t.Cleanup(func() { common.SetK8sSAFilePath(origSAPath) }) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(bsl, secret). + Build() + + plugin := &RestorePlugin{ + log: logrus.New(), + ctx: context.Background(), + client: fakeClient, + } + + t.Run("valid s3 URL produces pre-signed HTTPS URL", func(t *testing.T) { + result, err := plugin.presignS3URL(context.Background(), backup, "s3://my-bucket/path/to/snapshot.db") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if result == "" { + t.Fatal("expected non-empty URL") + } + + parsed, err := url.Parse(result) + if err != nil { + t.Fatalf("failed to parse result URL: %v", err) + } + + if parsed.Scheme != "https" { + t.Errorf("expected https scheme, got %s", parsed.Scheme) + } + + requiredParams := []string{ + "X-Amz-Algorithm", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Expires", + "X-Amz-Signature", + "X-Amz-SignedHeaders", + } + for _, p := range requiredParams { + if parsed.Query().Get(p) == "" { + t.Errorf("missing required query param %s", p) + } + } + }) + + t.Run("invalid s3 URL returns error", func(t *testing.T) { + _, err := plugin.presignS3URL(context.Background(), backup, "not-an-s3-url") + if err == nil { + t.Error("expected error for invalid S3 URL") + } + }) + + t.Run("BSL not found returns error", func(t *testing.T) { + badBackup := backup.DeepCopy() + badBackup.Spec.StorageLocation = "nonexistent" + + _, err := plugin.presignS3URL(context.Background(), badBackup, "s3://bucket/key") + if err == nil { + t.Error("expected error for nonexistent BSL") + } + }) + + t.Run("custom endpoint uses endpoint URL", func(t *testing.T) { + bslCustom := bsl.DeepCopy() + bslCustom.Name = "minio-bsl" + bslCustom.Spec.Config["s3Url"] = "https://minio.example.com" + bslCustom.Spec.Config["s3ForcePathStyle"] = "true" + + fakeClientCustom := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(bslCustom, secret). + Build() + + pluginCustom := &RestorePlugin{ + log: logrus.New(), + ctx: context.Background(), + client: fakeClientCustom, + } + + backupCustom := backup.DeepCopy() + backupCustom.Spec.StorageLocation = "minio-bsl" + + result, err := pluginCustom.presignS3URL(context.Background(), backupCustom, "s3://bucket/key") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + parsed, _ := url.Parse(result) + if parsed.Host != "minio.example.com" { + t.Errorf("expected minio.example.com host, got %s", parsed.Host) + } + }) +} diff --git a/pkg/s3presign/presign.go b/pkg/s3presign/presign.go new file mode 100644 index 000000000..aef2e9409 --- /dev/null +++ b/pkg/s3presign/presign.go @@ -0,0 +1,262 @@ +package s3presign + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "net/url" + "sort" + "strings" + "time" +) + +const DefaultPresignExpiry = 1 * time.Hour + +// nowFunc is overridden in tests for deterministic output. +var nowFunc = time.Now + +// PresignOptions holds all parameters needed to generate a pre-signed S3 GET URL. +type PresignOptions struct { + Bucket string + Key string + Region string + AccessKeyID string + SecretAccessKey string + SessionToken string // optional, for STS + Expiry time.Duration // URL validity duration + Endpoint string // custom S3 endpoint (e.g. MinIO, RHOCS) + ForcePathStyle bool // use path-style addressing +} + +// AWSCredentials holds parsed AWS credential values. +type AWSCredentials struct { + AccessKeyID string + SecretAccessKey string + SessionToken string +} + +// ParseS3URL parses "s3://bucket/path/to/key" into bucket and key components. +func ParseS3URL(rawURL string) (bucket, key string, err error) { + if rawURL == "" { + return "", "", fmt.Errorf("empty S3 URL") + } + + u, err := url.Parse(rawURL) + if err != nil { + return "", "", fmt.Errorf("invalid S3 URL %q: %w", rawURL, err) + } + + if u.Scheme != "s3" { + return "", "", fmt.Errorf("expected s3:// scheme, got %q", u.Scheme) + } + + bucket = u.Host + if bucket == "" { + return "", "", fmt.Errorf("missing bucket in S3 URL %q", rawURL) + } + + // u.Path includes leading slash, trim it + key = strings.TrimPrefix(u.Path, "/") + if key == "" { + return "", "", fmt.Errorf("missing key in S3 URL %q", rawURL) + } + + return bucket, key, nil +} + +// ParseAWSCredentials parses an AWS shared credentials file (INI format) +// and returns credentials for the given profile. +func ParseAWSCredentials(data []byte, profile string) (*AWSCredentials, error) { + if profile == "" { + profile = "default" + } + + lines := strings.Split(string(data), "\n") + var inProfile bool + creds := &AWSCredentials{} + + target := fmt.Sprintf("[%s]", profile) + + for _, line := range lines { + line = strings.TrimSpace(line) + + // skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { + continue + } + + // section header + if strings.HasPrefix(line, "[") { + inProfile = line == target + continue + } + + if !inProfile { + continue + } + + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + + k := strings.TrimSpace(parts[0]) + v := strings.TrimSpace(parts[1]) + + switch k { + case "aws_access_key_id": + creds.AccessKeyID = v + case "aws_secret_access_key": + creds.SecretAccessKey = v + case "aws_session_token": + creds.SessionToken = v + } + } + + if creds.AccessKeyID == "" { + return nil, fmt.Errorf("aws_access_key_id not found for profile %q", profile) + } + if creds.SecretAccessKey == "" { + return nil, fmt.Errorf("aws_secret_access_key not found for profile %q", profile) + } + + return creds, nil +} + +// GeneratePresignedGetURL creates a pre-signed HTTPS GET URL using AWS SigV4. +func GeneratePresignedGetURL(opts PresignOptions) (string, error) { + if opts.Bucket == "" || opts.Key == "" || opts.Region == "" { + return "", fmt.Errorf("bucket, key, and region are required") + } + if opts.AccessKeyID == "" || opts.SecretAccessKey == "" { + return "", fmt.Errorf("access key ID and secret access key are required") + } + + now := nowFunc().UTC() + datestamp := now.Format("20060102") + amzDate := now.Format("20060102T150405Z") + expirySeconds := int(opts.Expiry.Seconds()) + if expirySeconds <= 0 { + expirySeconds = int(DefaultPresignExpiry.Seconds()) + } + + // Build the endpoint URL + var host, urlPath string + if opts.Endpoint != "" { + ep := strings.TrimRight(opts.Endpoint, "/") + parsed, err := url.Parse(ep) + if err != nil { + return "", fmt.Errorf("invalid endpoint %q: %w", opts.Endpoint, err) + } + host = parsed.Host + urlPath = "/" + opts.Bucket + "/" + opts.Key + } else if opts.ForcePathStyle { + host = fmt.Sprintf("s3.%s.amazonaws.com", opts.Region) + urlPath = "/" + opts.Bucket + "/" + opts.Key + } else { + host = fmt.Sprintf("%s.s3.%s.amazonaws.com", opts.Bucket, opts.Region) + urlPath = "/" + opts.Key + } + + credential := fmt.Sprintf("%s/%s/%s/s3/aws4_request", opts.AccessKeyID, datestamp, opts.Region) + + // Build query parameters + params := url.Values{} + params.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256") + params.Set("X-Amz-Credential", credential) + params.Set("X-Amz-Date", amzDate) + params.Set("X-Amz-Expires", fmt.Sprintf("%d", expirySeconds)) + params.Set("X-Amz-SignedHeaders", "host") + if opts.SessionToken != "" { + params.Set("X-Amz-Security-Token", opts.SessionToken) + } + + // Canonical query string (sorted) + canonicalQueryString := sortedQueryString(params) + + // Canonical request + canonicalRequest := strings.Join([]string{ + "GET", + uriEncode(urlPath), + canonicalQueryString, + "host:" + host + "\n", + "host", + "UNSIGNED-PAYLOAD", + }, "\n") + + // String to sign + scope := fmt.Sprintf("%s/%s/s3/aws4_request", datestamp, opts.Region) + stringToSign := strings.Join([]string{ + "AWS4-HMAC-SHA256", + amzDate, + scope, + sha256Hex([]byte(canonicalRequest)), + }, "\n") + + // Signing key + signingKey := deriveSigningKey(opts.SecretAccessKey, datestamp, opts.Region, "s3") + + // Signature + signature := hex.EncodeToString(hmacSHA256(signingKey, []byte(stringToSign))) + + params.Set("X-Amz-Signature", signature) + + scheme := "https" + if opts.Endpoint != "" { + if parsed, err := url.Parse(opts.Endpoint); err == nil && parsed.Scheme != "" { + scheme = parsed.Scheme + } + } + + presignedURL := fmt.Sprintf("%s://%s%s?%s", scheme, host, uriEncode(urlPath), sortedQueryString(params)) + return presignedURL, nil +} + +func deriveSigningKey(secret, datestamp, region, service string) []byte { + kDate := hmacSHA256([]byte("AWS4"+secret), []byte(datestamp)) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + kSigning := hmacSHA256(kService, []byte("aws4_request")) + return kSigning +} + +func hmacSHA256(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} + +func sha256Hex(data []byte) string { + h := sha256.Sum256(data) + return hex.EncodeToString(h[:]) +} + +func sortedQueryString(params url.Values) string { + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + var parts []string + for _, k := range keys { + parts = append(parts, url.QueryEscape(k)+"="+url.QueryEscape(params.Get(k))) + } + return strings.Join(parts, "&") +} + +// uriEncode performs URI encoding per AWS SigV4 spec, preserving forward slashes. +// Uses url.QueryEscape (which encodes all reserved chars) and adjusts for SigV4: +// - '+' (space) → '%20' (SigV4 requires percent-encoding, not '+') +// - '%7E' → '~' (SigV4 treats '~' as unreserved) +func uriEncode(path string) string { + segments := strings.Split(path, "/") + for i, seg := range segments { + encoded := url.QueryEscape(seg) + encoded = strings.ReplaceAll(encoded, "+", "%20") + encoded = strings.ReplaceAll(encoded, "%7E", "~") + segments[i] = encoded + } + return strings.Join(segments, "/") +} diff --git a/pkg/s3presign/presign_test.go b/pkg/s3presign/presign_test.go new file mode 100644 index 000000000..7b4f1f590 --- /dev/null +++ b/pkg/s3presign/presign_test.go @@ -0,0 +1,425 @@ +package s3presign + +import ( + "net/url" + "strings" + "testing" + "time" +) + +func TestParseS3URL(t *testing.T) { + tests := []struct { + name string + rawURL string + wantBucket string + wantKey string + wantErr bool + }{ + { + name: "valid URL with nested key", + rawURL: "s3://my-bucket/path/to/snapshot.db", + wantBucket: "my-bucket", + wantKey: "path/to/snapshot.db", + }, + { + name: "valid URL with simple key", + rawURL: "s3://bucket/key", + wantBucket: "bucket", + wantKey: "key", + }, + { + name: "missing key - bucket only", + rawURL: "s3://my-bucket", + wantErr: true, + }, + { + name: "missing key - trailing slash", + rawURL: "s3://my-bucket/", + wantErr: true, + }, + { + name: "wrong scheme", + rawURL: "https://bucket/key", + wantErr: true, + }, + { + name: "empty URL", + rawURL: "", + wantErr: true, + }, + { + name: "special characters in key", + rawURL: "s3://bucket/path/to/snap%20shot.db", + wantBucket: "bucket", + wantKey: "path/to/snap shot.db", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bucket, key, err := ParseS3URL(tt.rawURL) + if (err != nil) != tt.wantErr { + t.Errorf("ParseS3URL() error = %v, wantErr %v", err, tt.wantErr) + return + } + if bucket != tt.wantBucket { + t.Errorf("ParseS3URL() bucket = %q, want %q", bucket, tt.wantBucket) + } + if key != tt.wantKey { + t.Errorf("ParseS3URL() key = %q, want %q", key, tt.wantKey) + } + }) + } +} + +func TestParseAWSCredentials(t *testing.T) { + tests := []struct { + name string + data string + profile string + wantAccessKey string + wantSecretKey string + wantToken string + wantErr bool + }{ + { + name: "standard default profile", + data: `[default] +aws_access_key_id = AKIAIOSFODNN7EXAMPLE +aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +`, + profile: "default", + wantAccessKey: "AKIAIOSFODNN7EXAMPLE", + wantSecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + }, + { + name: "profile with session token", + data: `[default] +aws_access_key_id = AKIAIOSFODNN7EXAMPLE +aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +aws_session_token = FwoGZXIvYXdzEBYaDHqa0AP +`, + profile: "default", + wantAccessKey: "AKIAIOSFODNN7EXAMPLE", + wantSecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + wantToken: "FwoGZXIvYXdzEBYaDHqa0AP", + }, + { + name: "multiple profiles - select non-default", + data: `[default] +aws_access_key_id = DEFAULT_KEY +aws_secret_access_key = DEFAULT_SECRET + +[production] +aws_access_key_id = PROD_KEY +aws_secret_access_key = PROD_SECRET +`, + profile: "production", + wantAccessKey: "PROD_KEY", + wantSecretKey: "PROD_SECRET", + }, + { + name: "comments and blank lines", + data: `# This is a comment +; This is also a comment + +[default] + aws_access_key_id = AKID + aws_secret_access_key = SECRET +`, + profile: "default", + wantAccessKey: "AKID", + wantSecretKey: "SECRET", + }, + { + name: "missing profile", + data: `[default] +aws_access_key_id = AKID +aws_secret_access_key = SECRET +`, + profile: "nonexistent", + wantErr: true, + }, + { + name: "missing access key", + data: `[default] +aws_secret_access_key = SECRET +`, + profile: "default", + wantErr: true, + }, + { + name: "missing secret key", + data: `[default] +aws_access_key_id = AKID +`, + profile: "default", + wantErr: true, + }, + { + name: "empty profile defaults to default", + data: "[default]\naws_access_key_id=KEY\naws_secret_access_key=SECRET\n", + profile: "", + wantAccessKey: "KEY", + wantSecretKey: "SECRET", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + creds, err := ParseAWSCredentials([]byte(tt.data), tt.profile) + if (err != nil) != tt.wantErr { + t.Errorf("ParseAWSCredentials() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + if creds.AccessKeyID != tt.wantAccessKey { + t.Errorf("AccessKeyID = %q, want %q", creds.AccessKeyID, tt.wantAccessKey) + } + if creds.SecretAccessKey != tt.wantSecretKey { + t.Errorf("SecretAccessKey = %q, want %q", creds.SecretAccessKey, tt.wantSecretKey) + } + if creds.SessionToken != tt.wantToken { + t.Errorf("SessionToken = %q, want %q", creds.SessionToken, tt.wantToken) + } + }) + } +} + +func TestUriEncode(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "when input has unreserved chars, it should not encode them", + input: "/path/AZaz09-_.~", + want: "/path/AZaz09-_.~", + }, + { + name: "when input has spaces, it should percent-encode them not use plus", + input: "/path/snap shot.db", + want: "/path/snap%20shot.db", + }, + { + name: "when input has reserved chars $ & + : @, it should percent-encode them", + input: "/key$with&special+chars:here@now", + want: "/key%24with%26special%2Bchars%3Ahere%40now", + }, + { + name: "when input has hash and question mark, it should percent-encode them", + input: "/path/file#1?v=2", + want: "/path/file%231%3Fv%3D2", + }, + { + name: "when input has forward slashes, it should preserve them", + input: "/a/b/c/d", + want: "/a/b/c/d", + }, + { + name: "when input has equals and semicolon, it should percent-encode them", + input: "/key=val;other", + want: "/key%3Dval%3Bother", + }, + { + name: "when input has a percent sign, it should encode it as %25", + input: "/100%done", + want: "/100%25done", + }, + { + name: "when input has unicode characters, it should percent-encode each byte", + input: "/datos/café.db", + want: "/datos/caf%C3%A9.db", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := uriEncode(tt.input) + if got != tt.want { + t.Errorf("uriEncode(%q) = %q, want %q", tt.input, got, tt.want) + } + }) + } +} + +func TestGeneratePresignedGetURL(t *testing.T) { + // Fix time for deterministic tests + fixedTime := time.Date(2026, 4, 8, 12, 0, 0, 0, time.UTC) + origNow := nowFunc + nowFunc = func() time.Time { return fixedTime } + defer func() { nowFunc = origNow }() + + baseOpts := PresignOptions{ + Bucket: "my-bucket", + Key: "path/to/snapshot.db", + Region: "us-east-1", + AccessKeyID: "AKIAIOSFODNN7EXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + Expiry: 1 * time.Hour, + } + + t.Run("basic virtual-hosted style URL", func(t *testing.T) { + result, err := GeneratePresignedGetURL(baseOpts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.HasPrefix(result, "https://") { + t.Errorf("URL should start with https://, got %s", result) + } + + parsed, err := url.Parse(result) + if err != nil { + t.Fatalf("failed to parse result URL: %v", err) + } + + if parsed.Host != "my-bucket.s3.us-east-1.amazonaws.com" { + t.Errorf("unexpected host: %s", parsed.Host) + } + + requiredParams := []string{ + "X-Amz-Algorithm", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Expires", + "X-Amz-Signature", + "X-Amz-SignedHeaders", + } + for _, p := range requiredParams { + if parsed.Query().Get(p) == "" { + t.Errorf("missing required query param %s", p) + } + } + + if parsed.Query().Get("X-Amz-Algorithm") != "AWS4-HMAC-SHA256" { + t.Errorf("unexpected algorithm: %s", parsed.Query().Get("X-Amz-Algorithm")) + } + if parsed.Query().Get("X-Amz-Expires") != "3600" { + t.Errorf("unexpected expires: %s", parsed.Query().Get("X-Amz-Expires")) + } + }) + + t.Run("deterministic output", func(t *testing.T) { + result1, _ := GeneratePresignedGetURL(baseOpts) + result2, _ := GeneratePresignedGetURL(baseOpts) + if result1 != result2 { + t.Errorf("same inputs should produce same output") + } + }) + + t.Run("session token adds security token param", func(t *testing.T) { + opts := baseOpts + opts.SessionToken = "FwoGZXIvYXdzEBYaDHqa0AP" + + result, err := GeneratePresignedGetURL(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + parsed, _ := url.Parse(result) + if parsed.Query().Get("X-Amz-Security-Token") == "" { + t.Error("expected X-Amz-Security-Token in URL") + } + }) + + t.Run("path-style addressing", func(t *testing.T) { + opts := baseOpts + opts.ForcePathStyle = true + + result, err := GeneratePresignedGetURL(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + parsed, _ := url.Parse(result) + if parsed.Host != "s3.us-east-1.amazonaws.com" { + t.Errorf("path-style host should be s3.region.amazonaws.com, got %s", parsed.Host) + } + if !strings.HasPrefix(parsed.Path, "/my-bucket/") { + t.Errorf("path-style path should start with /bucket/, got %s", parsed.Path) + } + }) + + t.Run("custom endpoint", func(t *testing.T) { + opts := baseOpts + opts.Endpoint = "https://minio.example.com" + + result, err := GeneratePresignedGetURL(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + parsed, _ := url.Parse(result) + if parsed.Host != "minio.example.com" { + t.Errorf("expected custom endpoint host, got %s", parsed.Host) + } + }) + + t.Run("When key has special characters, it should encode the path in the URL", func(t *testing.T) { + opts := baseOpts + opts.Key = "path/to/snap shot#1.db" + + result, err := GeneratePresignedGetURL(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // The URL path should contain encoded characters, not raw ones + if strings.Contains(result, " ") { + t.Error("presigned URL should not contain raw spaces") + } + if strings.Contains(strings.SplitN(result, "?", 2)[0], "#") { + t.Error("presigned URL path should not contain raw # character") + } + // Verify URL is parseable and has all required params + parsed, err := url.Parse(result) + if err != nil { + t.Fatalf("failed to parse presigned URL: %v", err) + } + if parsed.Query().Get("X-Amz-Signature") == "" { + t.Error("missing X-Amz-Signature") + } + }) + + t.Run("When key has unicode characters, it should encode the path in the URL", func(t *testing.T) { + opts := baseOpts + opts.Key = "datos/café/snapshot.db" + + result, err := GeneratePresignedGetURL(opts) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + parsed, err := url.Parse(result) + if err != nil { + t.Fatalf("failed to parse presigned URL: %v", err) + } + if parsed.Query().Get("X-Amz-Signature") == "" { + t.Error("missing X-Amz-Signature") + } + // Path should contain percent-encoded bytes for é (C3 A9) + urlBeforeQuery := strings.SplitN(result, "?", 2)[0] + if !strings.Contains(urlBeforeQuery, "%C3%A9") { + t.Errorf("expected percent-encoded unicode in path, got: %s", urlBeforeQuery) + } + }) + + t.Run("missing required fields", func(t *testing.T) { + _, err := GeneratePresignedGetURL(PresignOptions{}) + if err == nil { + t.Error("expected error for empty options") + } + }) + + t.Run("no session token means no security token param", func(t *testing.T) { + result, _ := GeneratePresignedGetURL(baseOpts) + parsed, _ := url.Parse(result) + if parsed.Query().Get("X-Amz-Security-Token") != "" { + t.Error("should not have X-Amz-Security-Token without session token") + } + }) +} diff --git a/tests/integration/s3presign/presign_integration_test.go b/tests/integration/s3presign/presign_integration_test.go new file mode 100644 index 000000000..28cae6c50 --- /dev/null +++ b/tests/integration/s3presign/presign_integration_test.go @@ -0,0 +1,361 @@ +//go:build integration + +// Package s3presign_integration tests the full pre-signed URL flow against real AWS S3. +// +// The test is fully self-contained: it creates an ephemeral S3 bucket, uploads test +// objects, generates pre-signed URLs, verifies downloads, and tears everything down. +// +// Requirements: +// - AWS credentials at ~/.aws/credentials (or path in AWS_SHARED_CREDENTIALS_FILE) +// - aws CLI installed and in PATH +// - Environment variables (all optional): +// - S3_TEST_REGION: AWS region (default: us-east-1) +// - AWS_PROFILE: credentials profile (default: default) +// - AWS_SHARED_CREDENTIALS_FILE: path to credentials file (default: ~/.aws/credentials) +// +// Run: go test -tags integration -v -timeout 120s ./tests/integration/s3presign/ +package s3presign_integration + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/openshift/hypershift-oadp-plugin/pkg/s3presign" +) + +const testObjectContent = "hypershift-oadp-plugin presign integration test data" + +type testEnv struct { + bucket string + region string + profile string + credFile string +} + +// setupEnv creates an ephemeral S3 bucket and returns the test environment. +// The bucket is deleted (with all objects) when the test finishes. +func setupEnv(t *testing.T) *testEnv { + t.Helper() + + // Check aws CLI is available + if _, err := exec.LookPath("aws"); err != nil { + t.Fatal("aws CLI not found in PATH — required for integration tests") + } + + region := os.Getenv("S3_TEST_REGION") + if region == "" { + region = "us-east-1" + } + + profile := os.Getenv("AWS_PROFILE") + if profile == "" { + profile = "default" + } + + credFile := os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if credFile == "" { + home, err := os.UserHomeDir() + if err != nil { + t.Fatalf("cannot determine home dir: %v", err) + } + credFile = filepath.Join(home, ".aws", "credentials") + } + + if _, err := os.Stat(credFile); err != nil { + t.Fatalf("credentials file %q not found: %v", credFile, err) + } + + // Create ephemeral bucket with unique name + bucket := fmt.Sprintf("hypershift-oadp-presign-test-%d", time.Now().UnixNano()) + + env := &testEnv{ + bucket: bucket, + region: region, + profile: profile, + credFile: credFile, + } + + env.createBucket(t) + t.Cleanup(func() { env.destroyBucket(t) }) + + return env +} + +func (e *testEnv) awsCmd(t *testing.T, args ...string) (string, error) { + t.Helper() + cmd := exec.Command("aws", args...) + cmd.Env = append(os.Environ(), + "AWS_SHARED_CREDENTIALS_FILE="+e.credFile, + "AWS_DEFAULT_REGION="+e.region, + "AWS_PROFILE="+e.profile, + ) + out, err := cmd.CombinedOutput() + return string(out), err +} + +func (e *testEnv) createBucket(t *testing.T) { + t.Helper() + + args := []string{"s3api", "create-bucket", "--bucket", e.bucket, "--region", e.region} + // us-east-1 doesn't accept LocationConstraint + if e.region != "us-east-1" { + args = append(args, "--create-bucket-configuration", fmt.Sprintf("LocationConstraint=%s", e.region)) + } + + out, err := e.awsCmd(t, args...) + if err != nil { + t.Fatalf("failed to create bucket %s: %v\n%s", e.bucket, err, out) + } + t.Logf("Created ephemeral bucket: %s", e.bucket) +} + +func (e *testEnv) destroyBucket(t *testing.T) { + t.Helper() + + // Remove all objects first + out, err := e.awsCmd(t, "s3", "rm", fmt.Sprintf("s3://%s", e.bucket), "--recursive") + if err != nil { + t.Errorf("LEAKED: failed to empty bucket %s (manual cleanup required): %v\n%s", e.bucket, err, out) + } + + // Delete the bucket + out, err = e.awsCmd(t, "s3", "rb", fmt.Sprintf("s3://%s", e.bucket)) + if err != nil { + t.Errorf("LEAKED: failed to delete bucket %s (manual cleanup required): %v\n%s", e.bucket, err, out) + return + } + t.Logf("Destroyed ephemeral bucket: %s", e.bucket) +} + +func (e *testEnv) uploadObject(t *testing.T, key, content string) { + t.Helper() + + tmpFile := filepath.Join(t.TempDir(), "test-data") + if err := os.WriteFile(tmpFile, []byte(content), 0644); err != nil { + t.Fatalf("failed to write temp file: %v", err) + } + + s3URI := fmt.Sprintf("s3://%s/%s", e.bucket, key) + out, err := e.awsCmd(t, "s3", "cp", tmpFile, s3URI) + if err != nil { + t.Fatalf("failed to upload %s: %v\n%s", s3URI, err, out) + } + t.Logf("Uploaded: %s", s3URI) +} + +func (e *testEnv) loadCredentials(t *testing.T) *s3presign.AWSCredentials { + t.Helper() + + data, err := os.ReadFile(e.credFile) + if err != nil { + t.Fatalf("failed to read credentials file: %v", err) + } + + creds, err := s3presign.ParseAWSCredentials(data, e.profile) + if err != nil { + t.Fatalf("failed to parse credentials: %v", err) + } + + return creds +} + +func TestPresignedURLDownload(t *testing.T) { + env := setupEnv(t) + creds := env.loadCredentials(t) + + objectKey := "test-snapshot.db" + env.uploadObject(t, objectKey, testObjectContent) + + // Parse and presign + s3URL := fmt.Sprintf("s3://%s/%s", env.bucket, objectKey) + bucket, key, err := s3presign.ParseS3URL(s3URL) + if err != nil { + t.Fatalf("ParseS3URL failed: %v", err) + } + + presignedURL, err := s3presign.GeneratePresignedGetURL(s3presign.PresignOptions{ + Bucket: bucket, + Key: key, + Region: env.region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Expiry: 15 * time.Minute, + }) + if err != nil { + t.Fatalf("GeneratePresignedGetURL failed: %v", err) + } + + t.Logf("Generated pre-signed URL (first 120 chars): %.120s...", presignedURL) + + if !strings.HasPrefix(presignedURL, "https://") { + t.Fatalf("pre-signed URL should start with https://, got: %s", presignedURL) + } + + // Download via HTTP GET + resp, err := http.Get(presignedURL) + if err != nil { + t.Fatalf("HTTP GET failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("expected HTTP 200, got %d: %s", resp.StatusCode, string(body)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if string(body) != testObjectContent { + t.Errorf("content mismatch: got %q, want %q", string(body), testObjectContent) + } + + t.Log("Successfully downloaded object via pre-signed URL, content matches") +} + +func TestPresignedURLNestedKey(t *testing.T) { + env := setupEnv(t) + creds := env.loadCredentials(t) + + objectKey := "backups/my-backup/etcd-backup/snapshot.db" + env.uploadObject(t, objectKey, testObjectContent) + + bucket, key, err := s3presign.ParseS3URL(fmt.Sprintf("s3://%s/%s", env.bucket, objectKey)) + if err != nil { + t.Fatalf("ParseS3URL failed: %v", err) + } + + presignedURL, err := s3presign.GeneratePresignedGetURL(s3presign.PresignOptions{ + Bucket: bucket, + Key: key, + Region: env.region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Expiry: 15 * time.Minute, + }) + if err != nil { + t.Fatalf("GeneratePresignedGetURL failed: %v", err) + } + + resp, err := http.Get(presignedURL) + if err != nil { + t.Fatalf("HTTP GET failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("expected HTTP 200, got %d: %s", resp.StatusCode, string(body)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if string(body) != testObjectContent { + t.Errorf("content mismatch for nested key: got %q, want %q", string(body), testObjectContent) + } + + t.Log("Successfully downloaded nested-key object via pre-signed URL") +} + +func TestPresignedURLWithSessionToken(t *testing.T) { + env := setupEnv(t) + creds := env.loadCredentials(t) + + if creds.SessionToken == "" { + t.Skip("skipping: no session token in credentials (not STS)") + } + + objectKey := "sts-test-snapshot.db" + env.uploadObject(t, objectKey, testObjectContent) + + presignedURL, err := s3presign.GeneratePresignedGetURL(s3presign.PresignOptions{ + Bucket: env.bucket, + Key: objectKey, + Region: env.region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Expiry: 15 * time.Minute, + }) + if err != nil { + t.Fatalf("GeneratePresignedGetURL with session token failed: %v", err) + } + + if !strings.Contains(presignedURL, "X-Amz-Security-Token") { + t.Fatal("pre-signed URL should contain X-Amz-Security-Token for STS credentials") + } + + resp, err := http.Get(presignedURL) + if err != nil { + t.Fatalf("HTTP GET failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("expected HTTP 200, got %d: %s", resp.StatusCode, string(body)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if string(body) != testObjectContent { + t.Errorf("content mismatch: got %q, want %q", string(body), testObjectContent) + } + + t.Log("Successfully downloaded via pre-signed URL with STS session token") +} + +func TestExpiredPresignedURLFails(t *testing.T) { + env := setupEnv(t) + creds := env.loadCredentials(t) + + objectKey := "expiry-test-snapshot.db" + env.uploadObject(t, objectKey, testObjectContent) + + // Generate URL with 1-second expiry + presignedURL, err := s3presign.GeneratePresignedGetURL(s3presign.PresignOptions{ + Bucket: env.bucket, + Key: objectKey, + Region: env.region, + AccessKeyID: creds.AccessKeyID, + SecretAccessKey: creds.SecretAccessKey, + SessionToken: creds.SessionToken, + Expiry: 1 * time.Second, + }) + if err != nil { + t.Fatalf("GeneratePresignedGetURL failed: %v", err) + } + + // Wait for expiry + t.Log("Waiting for pre-signed URL to expire...") + time.Sleep(3 * time.Second) + + resp, err := http.Get(presignedURL) + if err != nil { + t.Fatalf("HTTP GET failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + t.Fatal("expected expired URL to fail, but got HTTP 200") + } + + t.Logf("Expired URL correctly returned HTTP %d", resp.StatusCode) +} From 07ca88916ca436b758756ce97568f7ef14b75995 Mon Sep 17 00:00:00 2001 From: Juan Manuel Parrilla Madrid Date: Mon, 13 Apr 2026 11:13:57 +0200 Subject: [PATCH 3/3] test(restore): refactor restore tests to table-driven format with full Execute coverage Add table-driven tests for presignS3URL and restore Execute flow covering etcd snapshot URL injection, including s3://, https://, and no-annotation cases. Co-Authored-By: Claude Opus 4.6 Signed-off-by: Juan Manuel Parrilla Madrid --- pkg/core/restore_test.go | 449 +++++++++++++++++++++++++++++---------- 1 file changed, 334 insertions(+), 115 deletions(-) diff --git a/pkg/core/restore_test.go b/pkg/core/restore_test.go index 553f2b43b..69884d8d5 100644 --- a/pkg/core/restore_test.go +++ b/pkg/core/restore_test.go @@ -4,14 +4,18 @@ import ( "context" "net/url" "os" + "strings" "testing" common "github.com/openshift/hypershift-oadp-plugin/pkg/common" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/sirupsen/logrus" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + veleroapiv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -27,45 +31,250 @@ aws_access_key_id = AKIAIOSFODNN7EXAMPLE aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY `) - bsl := &velerov1api.BackupStorageLocation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "openshift-adp", - }, + defaultBSL := &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "openshift-adp"}, Spec: velerov1api.BackupStorageLocationSpec{ - Config: map[string]string{ - "region": "us-east-1", - }, + Config: map[string]string{"region": "us-east-1"}, Credential: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cloud-credentials", - }, - Key: "cloud", + LocalObjectReference: corev1.LocalObjectReference{Name: "cloud-credentials"}, + Key: "cloud", }, }, } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cloud-credentials", - Namespace: "openshift-adp", + defaultSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "cloud-credentials", Namespace: "openshift-adp"}, + Data: map[string][]byte{"cloud": credentialData}, + } + + defaultBackup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-backup", Namespace: "openshift-adp"}, + Spec: velerov1api.BackupSpec{StorageLocation: "default"}, + } + + origSAPath := common.DefaultK8sSAFilePath + nsDir := t.TempDir() + if err := os.WriteFile(nsDir+"/namespace", []byte("openshift-adp"), 0644); err != nil { + t.Fatalf("failed to write namespace file: %v", err) + } + common.SetK8sSAFilePath(nsDir) + t.Cleanup(func() { common.SetK8sSAFilePath(origSAPath) }) + + tests := []struct { + name string + setup func() (*RestorePlugin, *velerov1api.Backup) + s3URL string + wantErr bool + assert func(*testing.T, string) + }{ + { + name: "When presigning a valid s3 URL, it should produce a pre-signed HTTPS URL with all SigV4 params", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(defaultBSL, defaultSecret).Build() + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, defaultBackup + }, + s3URL: "s3://my-bucket/path/to/snapshot.db", + assert: func(t *testing.T, result string) { + parsed, err := url.Parse(result) + if err != nil { + t.Fatalf("failed to parse result URL: %v", err) + } + if parsed.Scheme != "https" { + t.Errorf("expected https scheme, got %s", parsed.Scheme) + } + for _, p := range []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Date", "X-Amz-Expires", "X-Amz-Signature", "X-Amz-SignedHeaders"} { + if parsed.Query().Get(p) == "" { + t.Errorf("missing required query param %s", p) + } + } + }, + }, + { + name: "When presigning an invalid URL, it should return error", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(defaultBSL, defaultSecret).Build() + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, defaultBackup + }, + s3URL: "not-an-s3-url", + wantErr: true, + }, + { + name: "When BSL does not exist, it should return error", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(defaultBSL, defaultSecret).Build() + backup := defaultBackup.DeepCopy() + backup.Spec.StorageLocation = "nonexistent" + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, backup + }, + s3URL: "s3://bucket/key", + wantErr: true, + }, + { + name: "When BSL has no credential, it should return error", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + bsl := defaultBSL.DeepCopy() + bsl.Name = "no-cred-bsl" + bsl.Spec.Credential = nil + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(bsl).Build() + backup := defaultBackup.DeepCopy() + backup.Spec.StorageLocation = "no-cred-bsl" + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, backup + }, + s3URL: "s3://bucket/key", + wantErr: true, }, - Data: map[string][]byte{ - "cloud": credentialData, + { + name: "When credential secret has wrong key, it should return error", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + wrongSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "wrong-key-secret", Namespace: "openshift-adp"}, + Data: map[string][]byte{"wrong-key": credentialData}, + } + bsl := defaultBSL.DeepCopy() + bsl.Name = "wrong-key-bsl" + bsl.Spec.Credential = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "wrong-key-secret"}, + Key: "cloud", + } + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(bsl, wrongSecret).Build() + backup := defaultBackup.DeepCopy() + backup.Spec.StorageLocation = "wrong-key-bsl" + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, backup + }, + s3URL: "s3://bucket/key", + wantErr: true, + }, + { + name: "When URL is already https, it should return error since presignS3URL only handles s3:// scheme", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(defaultBSL, defaultSecret).Build() + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, defaultBackup + }, + s3URL: "https://my-bucket.s3.us-east-1.amazonaws.com/path/to/snapshot.db?X-Amz-Signature=abc", + wantErr: true, + }, + { + name: "When BSL has custom endpoint, it should use that endpoint in the pre-signed URL", + setup: func() (*RestorePlugin, *velerov1api.Backup) { + bsl := defaultBSL.DeepCopy() + bsl.Name = "minio-bsl" + bsl.Spec.Config["s3Url"] = "https://minio.example.com" + bsl.Spec.Config["s3ForcePathStyle"] = "true" + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(bsl, defaultSecret).Build() + backup := defaultBackup.DeepCopy() + backup.Spec.StorageLocation = "minio-bsl" + return &RestorePlugin{log: logrus.New(), ctx: context.Background(), client: client}, backup + }, + s3URL: "s3://bucket/key", + assert: func(t *testing.T, result string) { + parsed, _ := url.Parse(result) + if parsed.Host != "minio.example.com" { + t.Errorf("expected minio.example.com host, got %s", parsed.Host) + } + }, }, } - backup := &velerov1api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-backup", - Namespace: "openshift-adp", + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin, backup := tt.setup() + result, err := plugin.presignS3URL(context.Background(), backup, tt.s3URL) + if tt.wantErr { + if err == nil { + t.Error("expected error but got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.assert != nil { + tt.assert(t, result) + } + }) + } +} + +func newHCUnstructured(name, namespace string, annotations map[string]string) *unstructured.Unstructured { + hc := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "hypershift.openshift.io/v1beta1", + "kind": "HostedCluster", + "metadata": map[string]any{ + "name": name, + "namespace": namespace, + }, + "spec": map[string]any{ + "etcd": map[string]any{ + "managementType": "Managed", + "managed": map[string]any{ + "storage": map[string]any{ + "type": "PersistentVolume", + "persistentVolume": map[string]any{ + "size": "8Gi", + }, + }, + }, + }, + "platform": map[string]any{"type": "AWS"}, + "release": map[string]any{"image": "quay.io/openshift-release-dev/ocp-release:4.16.0-x86_64"}, + "networking": map[string]any{ + "clusterNetwork": []any{map[string]any{"cidr": "10.132.0.0/14"}}, + "serviceNetwork": []any{map[string]any{"cidr": "172.31.0.0/16"}}, + }, + "pullSecret": map[string]any{"name": "pull-secret"}, + "infraID": "test-infra", + "services": []any{}, + }, + }, + } + if annotations != nil { + hc.Object["metadata"].(map[string]any)["annotations"] = func() map[string]any { + m := make(map[string]any, len(annotations)) + for k, v := range annotations { + m[k] = v + } + return m + }() + } + return hc +} + +func TestRestoreExecuteSnapshotURL(t *testing.T) { + s := common.CustomScheme + + credentialData := []byte("[default]\naws_access_key_id = AKIAIOSFODNN7EXAMPLE\naws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n") + + hcpCRD := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "hostedcontrolplanes.hypershift.openshift.io"}, + } + + bsl := &velerov1api.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "openshift-adp"}, + Spec: velerov1api.BackupStorageLocationSpec{ + Config: map[string]string{"region": "us-east-1"}, + Credential: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "cloud-credentials"}, + Key: "cloud", + }, }, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "cloud-credentials", Namespace: "openshift-adp"}, + Data: map[string][]byte{"cloud": credentialData}, + } + backup := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{Name: "test-backup", Namespace: "openshift-adp"}, Spec: velerov1api.BackupSpec{ - StorageLocation: "default", + StorageLocation: "default", + IncludedNamespaces: []string{"clusters", "clusters-test"}, }, } + restore := &velerov1api.Restore{ + ObjectMeta: metav1.ObjectMeta{Name: "test-restore", Namespace: "openshift-adp"}, + Spec: velerov1api.RestoreSpec{BackupName: "test-backup"}, + } - // Override namespace resolution for tests origSAPath := common.DefaultK8sSAFilePath nsDir := t.TempDir() if err := os.WriteFile(nsDir+"/namespace", []byte("openshift-adp"), 0644); err != nil { @@ -74,96 +283,106 @@ aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY common.SetK8sSAFilePath(nsDir) t.Cleanup(func() { common.SetK8sSAFilePath(origSAPath) }) - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(bsl, secret). - Build() - - plugin := &RestorePlugin{ - log: logrus.New(), - ctx: context.Background(), - client: fakeClient, - } - - t.Run("valid s3 URL produces pre-signed HTTPS URL", func(t *testing.T) { - result, err := plugin.presignS3URL(context.Background(), backup, "s3://my-bucket/path/to/snapshot.db") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if result == "" { - t.Fatal("expected non-empty URL") - } - - parsed, err := url.Parse(result) - if err != nil { - t.Fatalf("failed to parse result URL: %v", err) - } - - if parsed.Scheme != "https" { - t.Errorf("expected https scheme, got %s", parsed.Scheme) - } - - requiredParams := []string{ - "X-Amz-Algorithm", - "X-Amz-Credential", - "X-Amz-Date", - "X-Amz-Expires", - "X-Amz-Signature", - "X-Amz-SignedHeaders", - } - for _, p := range requiredParams { - if parsed.Query().Get(p) == "" { - t.Errorf("missing required query param %s", p) + // extractRestoreSnapshotURL is a helper to navigate the unstructured HC output + extractRestoreSnapshotURL := func(output *veleroapiv1.RestoreItemActionExecuteOutput) ([]any, bool) { + spec := output.UpdatedItem.UnstructuredContent()["spec"].(map[string]any) + etcd := spec["etcd"].(map[string]any) + managed := etcd["managed"].(map[string]any) + storage := managed["storage"].(map[string]any) + urls, ok := storage["restoreSnapshotURL"].([]any) + return urls, ok + } + + tests := []struct { + name string + annotations map[string]string + wantErr bool + assert func(*testing.T, *veleroapiv1.RestoreItemActionExecuteOutput) + }{ + { + name: "When HC has etcd-snapshot-url annotation with s3 scheme, it should inject pre-signed restoreSnapshotURL", + annotations: map[string]string{ + common.EtcdSnapshotURLAnnotation: "s3://my-bucket/path/to/snapshot.db", + }, + assert: func(t *testing.T, output *veleroapiv1.RestoreItemActionExecuteOutput) { + urls, ok := extractRestoreSnapshotURL(output) + if !ok || len(urls) == 0 { + t.Fatal("expected restoreSnapshotURL to be set") + } + presignedURL, ok := urls[0].(string) + if !ok { + t.Fatal("expected restoreSnapshotURL[0] to be a string") + } + if !strings.HasPrefix(presignedURL, "https://") { + t.Errorf("expected pre-signed URL to start with https://, got %s", presignedURL) + } + if !strings.Contains(presignedURL, "X-Amz-Signature") { + t.Errorf("expected pre-signed URL to contain X-Amz-Signature, got %s", presignedURL) + } + }, + }, + { + name: "When HC has no etcd-snapshot-url annotation, it should not inject restoreSnapshotURL", + annotations: nil, + assert: func(t *testing.T, output *veleroapiv1.RestoreItemActionExecuteOutput) { + spec := output.UpdatedItem.UnstructuredContent()["spec"].(map[string]any) + etcd := spec["etcd"].(map[string]any) + managed := etcd["managed"].(map[string]any) + storage := managed["storage"].(map[string]any) + if _, exists := storage["restoreSnapshotURL"]; exists { + t.Error("expected restoreSnapshotURL to NOT be set when no annotation is present") + } + }, + }, + { + name: "When HC has https annotation, it should inject it directly without presigning", + annotations: map[string]string{ + common.EtcdSnapshotURLAnnotation: "https://my-bucket.s3.us-east-1.amazonaws.com/path/to/snapshot.db?X-Amz-Signature=abc123", + }, + assert: func(t *testing.T, output *veleroapiv1.RestoreItemActionExecuteOutput) { + urls, ok := extractRestoreSnapshotURL(output) + if !ok || len(urls) == 0 { + t.Fatal("expected restoreSnapshotURL to be set for https URL") + } + expected := "https://my-bucket.s3.us-east-1.amazonaws.com/path/to/snapshot.db?X-Amz-Signature=abc123" + if urls[0].(string) != expected { + t.Errorf("expected URL to pass through unchanged, got %s", urls[0]) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(hcpCRD, bsl, secret, backup). + Build() + + plugin := &RestorePlugin{ + log: logrus.New(), + ctx: context.Background(), + client: fakeClient, + } + + hc := newHCUnstructured("my-hc", "clusters", tt.annotations) + + output, err := plugin.Execute(&veleroapiv1.RestoreItemActionExecuteInput{ + Item: hc, + Restore: restore, + }) + if tt.wantErr { + if err == nil { + t.Error("expected error but got nil") + } + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) } - } - }) - - t.Run("invalid s3 URL returns error", func(t *testing.T) { - _, err := plugin.presignS3URL(context.Background(), backup, "not-an-s3-url") - if err == nil { - t.Error("expected error for invalid S3 URL") - } - }) - - t.Run("BSL not found returns error", func(t *testing.T) { - badBackup := backup.DeepCopy() - badBackup.Spec.StorageLocation = "nonexistent" - - _, err := plugin.presignS3URL(context.Background(), badBackup, "s3://bucket/key") - if err == nil { - t.Error("expected error for nonexistent BSL") - } - }) - - t.Run("custom endpoint uses endpoint URL", func(t *testing.T) { - bslCustom := bsl.DeepCopy() - bslCustom.Name = "minio-bsl" - bslCustom.Spec.Config["s3Url"] = "https://minio.example.com" - bslCustom.Spec.Config["s3ForcePathStyle"] = "true" - - fakeClientCustom := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(bslCustom, secret). - Build() - - pluginCustom := &RestorePlugin{ - log: logrus.New(), - ctx: context.Background(), - client: fakeClientCustom, - } - - backupCustom := backup.DeepCopy() - backupCustom.Spec.StorageLocation = "minio-bsl" - - result, err := pluginCustom.presignS3URL(context.Background(), backupCustom, "s3://bucket/key") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - parsed, _ := url.Parse(result) - if parsed.Host != "minio.example.com" { - t.Errorf("expected minio.example.com host, got %s", parsed.Host) - } - }) + if tt.assert != nil { + tt.assert(t, output) + } + }) + } }